metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "642215734/pytorch_SSD",
"score": 3
} |
#### File: 642215734/pytorch_SSD/log_utils.py
```python
import os
import time
class TimeUtils():
def get_curr_date(self):
return time.strftime('%Y%m%d',time.localtime(time.time()))
def get_curr_time(self):
return time.strftime('%Y%m%d %H:%M:%S',time.localtime(time.time()))
class LogUtils():
def info(self,title,content):
self._log('info',title,content)
return None
def warn(self,title,content):
self._log('warning',title,content)
return None
def err(self,title,content):
self._log('error', title, content)
return None
def _log(self,level,title,content):
curr_date=TimeUtils().get_curr_date()
log_file_name=curr_date+level+'.txt'
log_content='{} | title:() | content:{}'.format(level,title,content)
print(log_content)
try:
with open(os.path.join('logs/',log_file_name),'a',encoding='utf8') as wf:
wf.write(log_content+'|'+TimeUtils.get_curr_time()+'\n')
except Exception as err:
return None
return None
if __name__ == '__main__':
print('test')
```
#### File: 642215734/pytorch_SSD/parse_VOC.py
```python
import sys
import os
import json
import xml.etree.ElementTree as ET
XML_DIR = '/home/zlatan/Database/VOCdevkit/VOC2007/Annotations'
START_BOUNDING_BOX_ID = 1
PRE_DEFINE_CATEGORIES = {}
# If necessary, pre-define category and its id
# PRE_DEFINE_CATEGORIES = {"aeroplane": 1, "bicycle": 2, "bird": 3, "boat": 4,
# "bottle":5, "bus": 6, "car": 7, "cat": 8, "chair": 9,
# "cow": 10, "diningtable": 11, "dog": 12, "horse": 13,
# "motorbike": 14, "person": 15, "pottedplant": 16,
# "sheep": 17, "sofa": 18, "train": 19, "tvmonitor": 20}
def get(root, name):
vars = root.findall(name)
return vars
def get_and_check(root, name, length):
vars = root.findall(name)
if len(vars) == 0:
raise NotImplementedError('Can not find %s in %s.' % (name, root.tag))
if length > 0 and len(vars) != length:
raise NotImplementedError('The size of %s is supposed to be %d, but is %d.' % (name, length, len(vars)))
if length == 1:
vars = vars[0]
return vars
def get_filename_as_int(filename):
try:
filename = os.path.splitext(filename)[0]
return int(filename)
except:
raise NotImplementedError('Filename %s is supposed to be an integer.' % (filename))
def VOC_get_categories(xml_dir=XML_DIR):
# print(os.path.exists(xml_dir))
categories = PRE_DEFINE_CATEGORIES
bnd_id = START_BOUNDING_BOX_ID
for root, dirs, files in os.walk(xml_dir):
for name in files:
# print(name)
# print(os.path.join(root, name))
xml_file = os.path.join(root, name)
xml_f = open(xml_file, 'r')
tree = ET.parse(xml_f)
tree_root = tree.getroot()
## Cruuently we do not support segmentation
# segmented = get_and_check(root, 'segmented', 1).text
# assert segmented == '0'
for obj in get(tree_root, 'object'):
category = get_and_check(obj, 'name', 1).text
if category not in categories:
new_id = len(categories) + 1
categories[category] = new_id
category_id = categories[category]
bndbox = get_and_check(obj, 'bndbox', 1)
bnd_id = bnd_id + 1
xml_f.close()
for cate, cid in categories.items():
cat = {'id': cid, 'name': cate}
# print(cat)
# print(len(categories))
return categories
if __name__ == '__main__':
# if len(sys.argv) < 1:
# print('1 augument are need.')
# print('Usage: %s XML_LIST.txt XML_DIR OUTPU_JSON.json'%(sys.argv[0]))
# exit(1)
categories = VOC_get_categories()
print(categories)
print([*categories])
``` |
{
"source": "644/save-golds",
"score": 2
} |
#### File: save-golds/Prototype/savegolds.py
```python
import os, sys, subprocess, time, socket, glob, re, select
from time import sleep, perf_counter
from shutil import copyfile
from importlib import util
from datetime import datetime
from moviepy.config import get_setting
from moviepy.tools import subprocess_call
from moviepy.editor import VideoFileClip
from moviepy.config import get_setting
from moviepy.tools import subprocess_call
# location for recordings
dir = "D:/replays/"
bkpdir = "D:/replays/old/"
recfolder = "D:/replays/recordings/"
# livesplit.server methods
getindex = str("getsplitindex\r\n").encode()
getsplitname = str("getprevioussplitname\r\n").encode()
getsplittime = str("getlastsplittime\r\n").encode()
getcurtime = str("getcurrenttime\r\n").encode()
getcomp = str("getcomparisonsplittime\r\n").encode()
getstatus = str("getcurrenttimerphase\r\n").encode()
# defaults
previndex = -1
starttime = 0
enabled = True
debug_mode = True
lasttime = None
oldpath = None
outfile = None
totaltime = None
recs = None
def file_in_use(fpath):
if os.path.exists(fpath):
try:
os.rename(fpath, fpath)
return False
except:
return True
def ffmpeg_extract_subclip(filename, t1, t2, targetname=None):
sleep(1)
name, ext = os.path.splitext(filename)
if not targetname:
T1, T2 = [int(1000*t) for t in [t1, t2]]
targetname = "%sSUB%d_%d.%s" % (name, T1, T2, ext)
cmd = [get_setting("FFMPEG_BINARY"),"-y",
"-ss", "%0.2f"%t1,
"-i", filename,
"-t", "%0.2f"%(t2-t1),
"-vcodec", "copy", "-acodec", "copy", targetname]
if debug_mode:
subprocess_call(cmd)
else:
subprocess_call(cmd, None)
def cut_replay(seconds, new_path, outfile, remove, from_end, last_replay):
global debug_mode
if debug_mode:
print("[AR] save_replay")
print("[AR] seconds=" + str(seconds) )
print("[AR] path=" + new_path)
print("[AR] remove=%s" %(remove))
print("[AR] from_end=%s" %(from_end))
if not enabled:
return
if seconds > 0:
if last_replay is not None and len(last_replay) > 0:
if debug_mode:
print("[AR] last_replay=" + last_replay)
last_replay_folder = os.path.dirname(os.path.abspath(last_replay))
last_replay_name, last_replay_type = os.path.splitext(os.path.basename(last_replay))
if len(new_path) <= 0 or not os.path.exists(new_path):
new_path = last_replay_folder
if debug_mode:
print("[AR] last_replay_folder=" + last_replay_folder)
print("[AR] last_replay_name=" + last_replay_name)
print("[AR] last_replay_type=" + last_replay_type)
print("[AR] new_path=" + new_path)
new_replay = os.path.join(new_path, outfile)
if debug_mode:
print("[AR] last_replay=" + last_replay)
print("[AR] new_replay=" + new_replay)
clip = VideoFileClip(last_replay)
duration = clip.duration
if duration > seconds:
if from_end:
if debug_mode: print("[AR] from_end")
ffmpeg_extract_subclip(last_replay, duration - seconds, duration+7, targetname=new_replay)
else:
if debug_mode: print("[AR] from_begin")
ffmpeg_extract_subclip(last_replay, 0, seconds+7, targetname=new_replay)
else:
copyfile(last_replay, new_replay)
clip.reader.close()
if clip.audio and clip.audio.reader:
clip.audio.reader.close_proc()
del clip.reader
del clip
if remove and os.path.exists(new_replay):
try:
if debug_mode: print("[AR] try remove")
for x in range(10):
if not file_in_use(last_replay):
break
if debug_mode: print("[AR] file not writeable, wait 0.5 seconds")
sleep(0.5)
if debug_mode: print("[AR] delete file:" + last_replay)
os.remove(last_replay)
except:
print("[AR] error ", sys.exc_info()[0], " on remove : ", last_replay)
return duration
def getdata(query):
try:
s.send(query)
data = s.recv(256)
return data.decode().strip()
except:
return None
def ask_livesplit():
global enabled
if not enabled:
return
global previndex
global lasttime
global index
global splitname
global curtime
global totaltime
global filelist
global outfile
global filen
global oldpb
global starttime
global substart
global oldsubstart
global recs
global recfolder
global dir
status = getdata(getstatus)
if status != "Running":
return
index = getdata(getindex)
if index:
try:
index = int(index)
except:
return
if not recs:
files_path = os.path.join(recfolder, '*')
recs = sorted(glob.iglob(files_path), key=os.path.getctime, reverse=True)
if not recs:
print("Couldn't find any recordings in '{}'".format(recfolder))
return
if previndex != index:
previndex = index
index = getdata(getindex)
if index:
index = int(index)
splitname = getdata(getsplitname)
curtime = getdata(getcurtime)
if curtime:
ts, ms = curtime.split('.')
curtime = sum(int(x) * 60 ** i for i, x in enumerate(reversed(ts.split(':'))))
curtime = float(f"{curtime}.{ms}")
else:
return
if lasttime:
sleep(1)
totaltime = round(curtime - lasttime, 2) + 2
outfile = "{}_{}_{}.mkv".format(index, totaltime, splitname)
filelist = glob.glob(dir+"{}_*_*.mkv".format(index))
if filelist:
filen = os.path.basename(filelist[0])
oldpb = float(re.search('.+_(.+)_.+.mkv', filen).group(1))
print(oldpb)
print(totaltime)
if oldpb > totaltime:
os.rename(filelist[0], bkpdir+filen)
path = dir+"buffer.mkv"
if starttime < 0:
starttime = 0
stime = perf_counter()
cmd = [get_setting("FFMPEG_BINARY"), "-y", "-i", recs[0], "-ss", "%0.2f"%starttime, "-c", "copy", dir+"tmp.mkv"]
subprocess_call(cmd)
duration = cut_replay(totaltime, dir, outfile, True, True, dir+"tmp.mkv")
etime = perf_counter()
substart = float(f"{etime-stime:0.2f}")
if duration:
starttime = starttime + duration - 2
if substart > 0:
starttime = starttime - substart
else:
path = dir+"buffer.mkv"
if starttime < 0:
starttime = 0
stime = perf_counter()
cmd = [get_setting("FFMPEG_BINARY"), "-y", "-i", recs[0], "-ss", "%0.2f"%starttime, "-c", "copy", dir+"tmp.mkv"]
subprocess_call(cmd)
duration = cut_replay(totaltime, dir, outfile, True, True, dir+"tmp.mkv")
etime = perf_counter()
substart = float(f"{etime-stime:0.2f}")
if duration:
starttime = starttime + duration - 2
if substart > 0:
starttime = starttime - substart
else:
lasttime = getdata(getsplittime)
print(lasttime)
if lasttime:
ts, ms = lasttime.split('.')
lasttime = sum(int(x) * 60 ** i for i, x in enumerate(reversed(ts.split(':'))))
lasttime = float(f"{lasttime}.{ms}")
def main_script():
global debug_mode
if debug_mode: print("[AR] Updated properties.")
global enabled
global getindex
global getsplitname
global getsplittime
global getcurtime
global getcomp
global getstatus
global dir
global bkpdir
global s
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", 16834))
s.settimeout(0.1)
while(True):
sleep(0.5)
ask_livesplit()
main_script()
``` |
{
"source": "645692095/virus_Toad",
"score": 3
} |
#### File: 645692095/virus_Toad/System.py
```python
import frozen # Pyinstaller多进程代码打包exe出现多个进程解决方案
import multiprocessing
import subprocess, time, sys, os
import win32con
import win32api
CMD = r"WinCoreManagement.exe" # 需要执行程序的绝对路径
def run(cmd):
# print('start OK!')
#os.path.abspath(__file__):获取当前文件的绝对路径
#os.path.dirname():获取路径名
#os.chdir(xxx)切换到xxx文件中
os.chdir(os.path.dirname(os.path.abspath(__file__)))
#在当前文件夹下运行cmd程序(不显示shell窗口,shell=False)
p = subprocess.Popen(cmd, shell=False)
p.wait() # 类似于p.join() ,等待上述cmd正常运行以后再往下执行
try:
#将cmd这个进程杀死,start /b是指在杀死这个进程之后再在后台重新运行cmd这个程序
subprocess.call('start /b taskkill /F /IM %s' % cmd) # 清理残余
except Exception as e:
# print(e)
pass
# print('子进程关闭,重启')
#递归调用run(),无限重启这个进程
run(cmd)
if __name__ == '__main__':
#multiprocessing在window上运行会有Bug:
# 在使用subprocess时候会启动两个子进程(实际上只需要一个)
# 解决办法:使用freeze_support()接口可以解决这个Bug
multiprocessing.freeze_support() # Pyinstaller多进程代码打包exe出现多个进程解决方案
run(CMD)
``` |
{
"source": "646801992/zaobot",
"score": 3
} |
#### File: zaobot/bot/context.py
```python
import time
from numbers import Rational
class Context:
def __init__(self, payload):
self.message = payload['message'].strip()
self.message_id = payload.get('message_id')
if self.message.startswith('/'): # message[0] will cause error if message is ''
message = self.message[1:].split()
# May have Attribute Error, cause Context doesn't always have these attributes
self.directive = message[0]
self.args = message[1:]
self.user_id = payload['user_id']
self.time = payload['time']
self.nickname = payload['sender'].get('nickname')
self.message_type = None
self.group_id = None
@classmethod
def build(cls, message, time_: Rational = None, user_id=None, nickname=None, group_id=None):
"""Secondary construct method, construct a context from messages sent by zaobot"""
return cls({
'message': message,
'message_id': -1,
'time': time_ if time_ is not None else time.time(),
'user_id': user_id if user_id is not None else 0,
'sender': {
'nickname': nickname if nickname is not None else 'zaobot'
}
})
@property
def name(self):
if hasattr(self, 'group_card') and self.group_card != '':
return self.group_card
else:
return self.nickname
class PrivateContext(Context):
def __init__(self, payload):
super().__init__(payload)
self.message_type = "private"
class GroupContext(Context):
def __init__(self, payload):
super().__init__(payload)
self.message_type = "group"
self.group_id = payload['group_id']
self.group_card = payload['sender'].get('card') # could be empty string
self.role = payload['sender'].get('role')
@classmethod
def build(cls, message="", time_: Rational = None, user_id=None, nickname=None, group_id=None):
return cls({
'message': message,
'message_id': -1,
'time': time_ if time_ is not None else time.time(),
'user_id': user_id if user_id is not None else 0,
'sender': {
'nickname': nickname if nickname is not None else 'zaobot'
},
'group_id': group_id
})
``` |
{
"source": "647-coder/Thrall",
"score": 2
} |
#### File: tests/test_amap/test_adapters.py
```python
from __future__ import absolute_import
import pytest
from thrall.amap.adapters import (
AMapEncodeAdapter,
AMapJsonDecoderAdapter,
BaseEncoderAdapter,
BaseDecoderAdapter,
)
from thrall.amap.models import (
GeoCodeResponseData,
GeoCodeRequestParams,
PreparedGeoCodeRequestParams,
ReGeoCodeResponseData,
PreparedReGeoCodeRequestParams,
SearchResponseData,
PreparedSearchTextRequestParams,
PreparedSearchAroundRequestParams,
SuggestResponseData,
PreparedSuggestRequestParams,
DistanceResponseData,
PreparedDistanceRequestParams,
NaviRidingResponseData,
PreparedNaviRidingRequestParams,
BatchResponseData,
PreparedBatchParams,
NaviWalkingResponseData,
PreparedNaviWalkingRequestParams,
NaviDrivingResponseData,
PreparedNaviDrivingRequestParams,
DistrictResponseData,
PreparedDistrictRequestParams,
)
class TestAMapEncodeAdapter(object):
def test_init_ok(self, mocker):
mocker.spy(AMapEncodeAdapter, 'registry_encoders')
model = AMapEncodeAdapter()
assert model.all_registered_coders is not None
assert model.registry_encoders.call_count == 1
def test_registry_encoders_ok(self):
model = AMapEncodeAdapter()
assert model.all_registered_coders[
'encode_geo_code'] == GeoCodeRequestParams
def test_registry_ok(self, mocker):
model = AMapEncodeAdapter()
mocker.spy(BaseEncoderAdapter, 'registry')
model.registry(model.encode_geo_code, GeoCodeRequestParams)
assert BaseEncoderAdapter.registry.call_count == 1
def test_encoder_context(self, mocker):
model = AMapEncodeAdapter()
model.registry(model.encode_geo_code, GeoCodeRequestParams)
mocker.spy(GeoCodeRequestParams, 'prepare')
model.get_encoder('encode_geo_code', address='xx', key='')
assert GeoCodeRequestParams.prepare.call_count == 1
def test_encoder_context_err(self):
model = AMapEncodeAdapter()
model.registry(model.encode_geo_code, GeoCodeRequestParams)
with pytest.raises(KeyError):
model.get_encoder('encode_xgeo_code', address='', key='')
class TestAMapJsonDecoderAdapter(object):
def test_init_ok(self, mocker):
mocker.spy(AMapJsonDecoderAdapter, 'registry_decoders')
model = AMapJsonDecoderAdapter()
assert model.all_registered_coders is not None
assert model.registry_decoders.call_count == 1
def test_registry_decoders_ok(self):
model = AMapJsonDecoderAdapter()
assert model.all_registered_coders[
'decode_geo_code'] == GeoCodeResponseData
def test_registry_ok(self, mocker):
model = AMapJsonDecoderAdapter()
mocker.spy(BaseDecoderAdapter, 'registry')
model.registry(model.decode_geo_code, GeoCodeRequestParams)
assert BaseDecoderAdapter.registry.call_count == 1
def test_decoder_context(self, mocker):
model = AMapJsonDecoderAdapter()
model.registry(model.decode_geo_code, GeoCodeResponseData)
mocker.spy(GeoCodeResponseData, '__init__')
model.get_decoder('decode_geo_code', raw_data='{}')
assert GeoCodeResponseData.__init__.call_count == 1
def test_decoder_context_err(self):
model = AMapJsonDecoderAdapter()
model.registry(model.decode_geo_code, GeoCodeResponseData)
with pytest.raises(KeyError):
model.get_decoder('encode_xgeo_code', raw_data={})
@pytest.mark.parametrize('func, params, result, instance', [
('encode_geo_code',
dict(address='abc', key='def'),
dict(address=['abc'], key='def'),
PreparedGeoCodeRequestParams),
('encode_regeo_code',
dict(location='125,25', key='def'),
dict(location=[(125, 25)], key='def'),
PreparedReGeoCodeRequestParams),
('encode_search_text',
dict(keywords=u'北京大学|xxx', key='def'),
dict(keywords=[u'北京大学', 'xxx'], key='def'),
PreparedSearchTextRequestParams),
('encode_search_around',
dict(location='123,45|322,33', key='def'),
dict(location=(123, 45), key='def'),
PreparedSearchAroundRequestParams),
('encode_suggest',
dict(keyword=u'北京大学', key='def'),
dict(keyword=u'北京大学', key='def'),
PreparedSuggestRequestParams),
('encode_distance',
dict(origins='111,222.0', destination='111.1,333.0', type=0, key='xxx'),
dict(origins=[(111, 222.0)], destination=(111.1, 333.0), type=0,
key='xxx'),
PreparedDistanceRequestParams),
('encode_riding',
dict(origin='111.0,22', destination='1,2.0', key='xxx'),
dict(origin=(111.0, 22), destination=(1, 2.0), key='xxx'),
PreparedNaviRidingRequestParams),
('encode_walking',
dict(origin='111.0,22', destination='1,2.0', key='xxx'),
dict(origin=(111.0, 22), destination=(1, 2.0), key='xxx'),
PreparedNaviWalkingRequestParams),
('encode_driving',
dict(origin='111.0,22', destination='1,2.0', key='xxx'),
dict(origin=(111.0, 22), destination=(1, 2.0), key='xxx'),
PreparedNaviDrivingRequestParams),
('encode_batch',
dict(key='xxx'),
dict(key='xxx'),
PreparedBatchParams),
('encode_district',
dict(keyword='xxx', key='xx'),
dict(keyword='xxx', key='xx'),
PreparedDistrictRequestParams),
])
def test_amap_encode_adapter_func(func, params, result, instance):
model = AMapEncodeAdapter()
r = getattr(model, func)(**params)
for k, v in result.items():
assert getattr(r, k) == v
assert isinstance(r, instance)
@pytest.mark.parametrize("func, instance", [
('decode_geo_code', GeoCodeResponseData),
('decode_regeo_code', ReGeoCodeResponseData),
('decode_search_text', SearchResponseData),
('decode_search_around', SearchResponseData),
('decode_suggest', SuggestResponseData),
('decode_distance', DistanceResponseData),
('decode_riding', NaviRidingResponseData),
('decode_walking', NaviWalkingResponseData),
('decode_driving', NaviDrivingResponseData),
('decode_district', DistrictResponseData),
])
def test_amap_json_decode_adapter_func(func, instance):
model = AMapJsonDecoderAdapter()
r = getattr(model, func)(raw_data='{"status": "1"}')
assert r.status == 1
assert isinstance(r, instance)
def test_amap_json_batch_decode_adapter_func():
model = AMapJsonDecoderAdapter()
r = model.decode_batch('{"status": "1"}', None, None)
assert r.status == 1
assert isinstance(r, BatchResponseData)
```
#### File: tests/test_amap/test_suggest_model.py
```python
from __future__ import absolute_import
from six import iteritems
from thrall.compat import unicode
import pytest
from thrall.amap._models import _suggest_model
class TestSuggestRequestParams(object):
def test_init_ok(self):
model = _suggest_model.SuggestRequestParams(keyword='keyword',
key='key')
assert model.keyword == 'keyword'
assert model.key == 'key'
def test_prepare_data_ok(self):
model = _suggest_model.SuggestRequestParams(keyword='keyword',
key='xxx')
p = model.prepare()
assert isinstance(p, _suggest_model.PreparedSuggestRequestParams)
class TestPreparedSuggestRequestParams(object):
def test_init_ok(self):
model = _suggest_model.PreparedSuggestRequestParams()
assert (model.types == model.keyword == model.location == model.city
== model.city_limit == model.data_type is None)
def test_prepare(self, mocker):
model = _suggest_model.PreparedSuggestRequestParams()
model.prepare_keyword = lambda x: 'keyword'
model.prepare_types = lambda x: 'types'
model.prepare_location = lambda x: 'location'
model.prepare_city = lambda x: 'city'
model.prepare_city_limit = lambda x: 'city_limit'
model.prepare_data_type = lambda x: 'data_type'
mocker.spy(model, 'prepare_keyword')
mocker.spy(model, 'prepare_types')
mocker.spy(model, 'prepare_location')
mocker.spy(model, 'prepare_city')
mocker.spy(model, 'prepare_city_limit')
mocker.spy(model, 'prepare_data_type')
model.prepare(key='xxx', keyword='keyword', types='types',
location='location', city='city',
city_limit=True, data_type='data_type')
model.prepare_keyword.assert_called_once_with('keyword')
model.prepare_types.assert_called_once_with('types')
model.prepare_location.assert_called_once_with('location')
model.prepare_city.assert_called_once_with('city')
model.prepare_city_limit.assert_called_once_with(True)
model.prepare_data_type.assert_called_once_with('data_type')
@pytest.mark.parametrize('data, result', [
('test', 'test'), (u'中国', u'中国'), (123, u'123'), (None, None)
])
def test_prepare_keyword(self, data, result):
model = _suggest_model.PreparedSuggestRequestParams()
model.prepare_keyword(data)
assert model.keyword == result
assert model.prepared_keyword == result
if data:
assert isinstance(model.prepared_keyword, unicode)
def test_prepare_types(self, mocker):
model = _suggest_model.PreparedSuggestRequestParams()
mocker.spy(_suggest_model, 'prepare_multi_pois')
model.prepare_types('xxx')
assert model.types == ['xxx']
assert model.prepared_types == 'xxx'
_suggest_model.prepare_multi_pois.assert_called_once_with('xxx')
@pytest.mark.parametrize('data, result, p_result', [
('123,45', (123, 45), '123.000000,45.000000'),
('123,45|223,34', (123, 45), '123.000000,45.000000'),
(['123,45'], (123, 45), '123.000000,45.000000'),
([(123, 45)], (123, 45), '123.000000,45.000000'),
])
def test_prepare_location(self, mocker, data, result, p_result):
model = _suggest_model.PreparedSuggestRequestParams()
mocker.spy(_suggest_model, 'prepare_multi_locations')
model.prepare_location(data)
assert model.location == result
assert model.prepared_location == p_result
_suggest_model.prepare_multi_locations.assert_called_once_with(data)
@pytest.mark.parametrize('data, result', [
('xx', 'xx'), (u'上海', u'上海'), (123, u'123'), (None, None),
])
def test_prepare_city(self, data, result):
model = _suggest_model.PreparedSuggestRequestParams()
model.prepare_city(data)
assert model.city == result
assert model.prepared_city == result
if data:
assert isinstance(model.prepared_city, unicode)
@pytest.mark.parametrize('data, result, p_result', [
(True, _suggest_model.CityLimitFlag.ON, 'true'),
(False, _suggest_model.CityLimitFlag.OFF, 'false'),
(_suggest_model.CityLimitFlag.ON,
_suggest_model.CityLimitFlag.ON, 'true'),
(_suggest_model.CityLimitFlag.OFF,
_suggest_model.CityLimitFlag.OFF, 'false'),
])
def test_prepare_city_limit(self, data, result, p_result):
model = _suggest_model.PreparedSuggestRequestParams()
model.prepare_city_limit(data)
assert model.city_limit == result
assert model.prepared_city_limit == p_result
def test_prepare_data_type(self, mocker):
model = _suggest_model.PreparedSuggestRequestParams()
mocker.spy(model, 'prepare_multi_data_types')
model.prepare_data_type('alL')
assert model.data_type == [_suggest_model.DataType.ALL]
assert model.prepared_data_type == 'all'
model.prepare_multi_data_types.assert_called_once_with('alL')
@pytest.mark.parametrize('input, output', [
(dict(keyword='xxx', key='key'),
dict(keywords='xxx', key='key')),
(dict(keyword='xxx', key='key', data_type=['all', 'poi']),
dict(keywords='xxx', key='key', datatype='all|poi')),
(dict(keyword='xxx', key='key', city_limit=True),
dict(keywords='xxx', key='key', citylimit='true')),
])
def test_generate_params(self, input, output):
model = _suggest_model.PreparedSuggestRequestParams()
model.prepare(**input)
for k, v in iteritems(output):
assert model.params[k] == v
class TestSuggestResponseData(object):
RAW_DATA = """{"status":"1","count":"10","info":"OK","infocode":"10000",
"tips":[{"id":[],"name":"肯德基","district":[],"adcode":[],
"location":[],"address":[],"typecode":[]},
{"id":"B000A7BM4H","name":"肯德基(花家地店)","district":"北京市朝阳区",
"adcode":"110105","location":"116.469271,39.985568",
"address":"花家地小区1号商业楼","typecode":"050301"}]}"""
def test_data(self, mocker):
model = _suggest_model.SuggestResponseData(self.RAW_DATA)
mocker.spy(model, 'get_data')
assert isinstance(model.data, list)
assert isinstance(model.data[0], _suggest_model.SuggestData)
assert model.get_data.call_count == 2
for i, j in zip(model.data, model.data):
assert i is not j
def test_data_in_static(self, mocker):
model = _suggest_model.SuggestResponseData(self.RAW_DATA,
static_mode=True)
mocker.spy(model, 'get_data')
assert isinstance(model.data, list)
assert isinstance(model.data[0], _suggest_model.SuggestData)
assert model.get_data.call_count == 0
for i, j in zip(model.data, model.data):
assert i is j
assert model.data[1].typecode is not None
```
#### File: Thrall/tests/test_hooks.py
```python
from thrall.hooks import _SetDefault, _SetDefaultWithParams, SetD
class TestSetDefault(object):
@_SetDefault
def mock(self, **kwargs):
return kwargs
@_SetDefault
def mock2(self, a=1, b=2):
return a, b
def test_set_ok(self):
self.mock.set_default(a=1, b=2)
r = self.mock(c=3)
assert r['a'] == 1
assert r['b'] == 2
assert r['c'] == 3
def test_set_default_ok(self):
self.mock.set_default(a=1, b=3)
r = self.mock(a=3)
assert r['a'] == 3
assert r['b'] == 3
def test_series_ok(self):
self.mock.set_default(a=1)
r = self.mock(a=3)
assert r['a'] == 3
assert r.get('b') == 3
def test_kwargs_override_ok(self):
self.mock2.set_default(a=3)
r = self.mock2(b=2)
assert r == (3, 2)
def test_outside_ok():
@_SetDefault
def mock(a=1, b=2):
return a, b
mock.set_default(a=3)
r = mock(a=2, b=2)
assert r == (2, 2)
class TestSetDefaultWithParams(object):
@_SetDefaultWithParams(a=1)
def mock(self, **kwargs):
return kwargs
def test_set_ok(self):
r = self.mock(b=2)
assert r['a'] == 1
assert r['b'] == 2
def test_set_default_ok(self):
r = self.mock(a=2, b=2)
assert r['a'] == 2
assert r['b'] == 2
def test_tests(self):
r = self.mock()
assert r['a'] == 1
assert r.get('b') is None
class TestSetD(object):
def test_no_params(self, mocker):
mocker.spy(_SetDefault, '__init__')
mocker.spy(_SetDefault, '__call__')
mocker.spy(_SetDefaultWithParams, '__init__')
mocker.spy(_SetDefaultWithParams, '__call__')
@SetD
def mock(**kwargs): pass
mock(a=1)
assert _SetDefault.__init__.call_count == 1
assert _SetDefault.__call__.call_count == 1
assert _SetDefaultWithParams.__init__.call_count == 0
assert _SetDefaultWithParams.__call__.call_count == 0
def test_wth_params(self, mocker):
mocker.spy(_SetDefault, '__init__')
mocker.spy(_SetDefault, '__call__')
mocker.spy(_SetDefaultWithParams, '__init__')
mocker.spy(_SetDefaultWithParams, '__call__')
@SetD(a=1)
def mock(**kwargs): pass
mock(a=2)
assert _SetDefault.__init__.call_count == 0
assert _SetDefault.__call__.call_count == 0
assert _SetDefaultWithParams.__init__.call_count == 1
assert _SetDefaultWithParams.__call__.call_count == 1
```
#### File: thrall/amap/adapters.py
```python
from __future__ import absolute_import
from ..base import BaseDecoderAdapter, BaseEncoderAdapter
from .models import (
DistanceRequestParams,
DistanceResponseData,
GeoCodeRequestParams,
GeoCodeResponseData,
ReGeoCodeRequestParams,
ReGeoCodeResponseData,
SearchAroundRequestParams,
SearchResponseData,
SearchTextRequestParams,
SuggestRequestParams,
SuggestResponseData,
DistrictRequestParams,
DistrictResponseData,
NaviRidingRequestParams,
NaviRidingResponseData,
NaviWalkingRequestParams,
NaviWalkingResponseData,
NaviDrivingRequestParams,
NaviDrivingResponseData,
BatchRequestParams,
BatchResponseData,
)
class AMapEncodeAdapter(BaseEncoderAdapter):
def get_encoder(self, func_name, *args, **kwargs):
encoder = self.all_registered_coders[func_name]
p_encoder = encoder(*args, **kwargs).prepare()
return p_encoder
def registry_encoders(self):
self.registry(self.encode_geo_code, GeoCodeRequestParams)
self.registry(self.encode_regeo_code, ReGeoCodeRequestParams)
self.registry(self.encode_search_text, SearchTextRequestParams)
self.registry(self.encode_search_around, SearchAroundRequestParams)
self.registry(self.encode_suggest, SuggestRequestParams)
self.registry(self.encode_district, DistrictRequestParams)
self.registry(self.encode_distance, DistanceRequestParams)
self.registry(self.encode_riding, NaviRidingRequestParams)
self.registry(self.encode_walking, NaviWalkingRequestParams)
self.registry(self.encode_driving, NaviDrivingRequestParams)
self.registry(self.encode_batch, BatchRequestParams)
def registry(self, func, coder):
return super(AMapEncodeAdapter, self).registry(func, coder)
def encode_geo_code(self, *args, **kwargs):
return self.get_encoder('encode_geo_code', *args, **kwargs)
def encode_regeo_code(self, *args, **kwargs):
return self.get_encoder('encode_regeo_code', *args, **kwargs)
def encode_search_text(self, *args, **kwargs):
return self.get_encoder('encode_search_text', *args, **kwargs)
def encode_search_around(self, *args, **kwargs):
return self.get_encoder('encode_search_around', *args, **kwargs)
def encode_suggest(self, *args, **kwargs):
return self.get_encoder('encode_suggest', *args, **kwargs)
def encode_district(self, *args, **kwargs):
return self.get_encoder('encode_district', *args, **kwargs)
def encode_distance(self, *args, **kwargs):
return self.get_encoder('encode_distance', *args, **kwargs)
def encode_riding(self, *args, **kwargs):
return self.get_encoder('encode_riding', *args, **kwargs)
def encode_walking(self, *args, **kwargs):
return self.get_encoder('encode_walking', *args, **kwargs)
def encode_driving(self, *args, **kwargs):
return self.get_encoder('encode_driving', *args, **kwargs)
def encode_batch(self, *args, **kwargs):
return self.get_encoder('encode_batch', *args, **kwargs)
class AMapJsonDecoderAdapter(BaseDecoderAdapter):
def __init__(self, static_mode=False):
super(AMapJsonDecoderAdapter, self).__init__()
self._static = static_mode
def get_decoder(self, func_name, *args, **kwargs):
decoder = self.all_registered_coders[func_name]
if self._static:
kwargs['static_mode'] = True
p_decoder = decoder(*args, **kwargs)
return p_decoder
def registry_decoders(self):
self.registry(self.decode_geo_code, GeoCodeResponseData)
self.registry(self.decode_regeo_code, ReGeoCodeResponseData)
self.registry(self.decode_search_text, SearchResponseData)
self.registry(self.decode_search_around, SearchResponseData)
self.registry(self.decode_suggest, SuggestResponseData)
self.registry(self.decode_district, DistrictResponseData)
self.registry(self.decode_distance, DistanceResponseData)
self.registry(self.decode_riding, NaviRidingResponseData)
self.registry(self.decode_walking, NaviWalkingResponseData)
self.registry(self.decode_driving, NaviDrivingResponseData)
self.registry(self.decode_batch, BatchResponseData)
def registry(self, func, coder):
return super(AMapJsonDecoderAdapter, self).registry(func, coder)
def decode_geo_code(self, *args, **kwargs):
return self.get_decoder('decode_geo_code', *args, **kwargs)
def decode_regeo_code(self, *args, **kwargs):
return self.get_decoder('decode_regeo_code', *args, **kwargs)
def decode_search_text(self, *args, **kwargs):
return self.get_decoder('decode_search_text', *args, **kwargs)
def decode_search_around(self, *args, **kwargs):
return self.get_decoder('decode_search_around', *args, **kwargs)
def decode_suggest(self, *args, **kwargs):
return self.get_decoder('decode_suggest', *args, **kwargs)
def decode_district(self, *args, **kwargs):
return self.get_decoder('decode_district', *args, **kwargs)
def decode_distance(self, *args, **kwargs):
return self.get_decoder('decode_distance', *args, **kwargs)
def decode_riding(self, *args, **kwargs):
return self.get_decoder('decode_riding', *args, **kwargs)
def decode_walking(self, *args, **kwargs):
return self.get_decoder('decode_walking', *args, **kwargs)
def decode_driving(self, *args, **kwargs):
return self.get_decoder('decode_driving', *args, **kwargs)
def decode_batch(self, *args, **kwargs):
return self.get_decoder('decode_batch', *args, **kwargs)
```
#### File: amap/_models/_base_model.py
```python
from __future__ import absolute_import
import logging
import contextlib
import functools
from hashlib import md5
from shapely.geometry import Polygon, MultiPolygon
from six import iteritems
from thrall.compat import unicode, urlparse
from thrall.consts import FORMAT_JSON, FORMAT_XML, RouteKey
from thrall.exceptions import VendorError, amap_status_exception
from thrall.utils import MapStatusMessage, required_params, repr_params
from ..common import json_load_and_fix_amap_empty, parse_location
from ..consts import AMapVersion, ExtensionFlag, OutputFmt, StatusFlag
_logger = logging.getLogger(__name__)
class Extensions(object):
""" AMap extension control class """
def __init__(self, flag=False, **opt_options):
""" get an instance of extensions.
:param flag: extension flag, True if wan't enable extensions.
:param opt_options: k, w pair of options.
"""
self._flag = flag
self._opt_options = opt_options
def __getattr__(self, prop):
return self._opt_options.get(prop)
@property
def status(self):
return ExtensionFlag.ALL if self._flag else ExtensionFlag.BASE
class Sig(object):
""" AMap sig generator """
def __init__(self, pkey, hash_fn=md5, kwargs=None):
""" get an instance of sig
Note: sig must be enable in amap online control panel, please
enable sig and got and pkey first.
Please see: http://lbs.amap.com/faq/account/key/72
:param pkey: amap private key.
:param hash_fn: hash function, default by md5, custom hash function
must implement digest function.
:param kwargs: request params, same as request params.
"""
self.private_key = pkey
self.hash_func = hash_fn
self.kw = kwargs if kwargs is not None else {}
def __repr__(self):
return repr_params(['method', 'sig'],
"AMap{}".format(self.__class__.__name__),
self, default_value={
"method": self.hash_func.__name__.upper(),
"sig": u"'{}'".format(self.unhash_sig)})
@property
def hashed_sig(self):
sig = (self.unhash_sig.encode('utf-8') if isinstance(
self.unhash_sig, unicode) else self.unhash_sig)
return self.hash_func(sig).hexdigest()
@property
def unhash_sig(self):
kw_pairs = sorted(iteritems(self.kw), key=lambda d: d[0])
prepared_sig = u"{kw}{sig}".format(
sig=self.private_key,
kw=u"&".join([u"{k}={v}".format(k=k, v=v) for k, v in kw_pairs])
)
return prepared_sig
class BaseRequestParams(object):
ROUTE_KEY = RouteKey.UNKNOWN
@required_params('key')
def __init__(self, key=None, output=None, private_key=None, callback=None,
raw_params=None):
self.key = key
self.output = output
self.callback = callback
self.private_key = private_key
self._raw_params = raw_params
def prepare(self):
try:
return self.prepare_data()
except VendorError as err:
err.data = self
raise err
def prepare_data(self):
""" package request params
input --> Nothing
output --> prepared object, type_extend: BasePreparedRequestParams
override example:
def prepare(self):
p = BasePreparedRequestParams()
p.prepare(**some_kwargs)
return p
:raise NotImplementedError: this function need to be implement.
"""
raise NotImplementedError
@contextlib.contextmanager
def prepare_basic(self, p):
org_fun = p.prepare
new_fun = functools.partial(p.prepare, key=self.key,
pkey=self.private_key,
output=self.output,
callback=self.callback,
raw_params=self._raw_params)
p.prepare = new_fun
yield p
p.prepare = org_fun
class BasePreparedRequestParams(object):
DEFAULT_URL = None
ROUTE_KEY = RouteKey.UNKNOWN
def __init__(self):
self.key = None
self._pkey = None
self.output = None
self.callback = None
self._raw_params = None
def __unicode__(self):
params = [k for k, v in iteritems(self.__dict__) if
not hasattr(v, '__call__')]
params.append('sig')
return repr_params(params, self.__class__.__name__, self)
def __repr__(self):
return self.__unicode__()
def generate_params(self):
""" generate prepared params without sig
input --> self
output --> prepared params dict without `sig`.
override example:
def generate_params(self):
optional_params = {..}
with self.init_basic_params({}, optional_params) as p:
# add required params
return p
:raise NotImplementedError: this function need to be implement.
"""
raise NotImplementedError
@property
def params(self):
p = self.generate_params()
p.update({'sig': self.prepared_sig} if self._pkey else {})
return p
def prepare(self, **kwargs):
""" called prepare data functions
input --> kwargs witch need be package to dict
output --> Any
override example:
def prepare(self, a=1, b=2, c=3, key='xx'):
# do custom prepare function
# self.prepare_something(a=a, b=b, c=c)
self.prepare_base(key=key)
:raise NotImplementedError: this function need to be implement.
"""
raise NotImplementedError
def prepare_base(self, key=None, pkey=None, output=None, callback=None,
raw_params=None):
self._pkey = pkey
self._raw_params = raw_params
self.prepare_key(key)
self.prepare_output(output)
self.prepare_callback(callback)
def prepare_key(self, key):
self.key = key
def prepare_output(self, output):
if output is None:
return
def _show_warning():
if output == OutputFmt.XML or str(output).lower() == FORMAT_XML:
_logger.warning('XML support deprecated, Only support json.')
if isinstance(output, OutputFmt):
_show_warning()
self.output = OutputFmt.JSON
else:
if output.lower() == FORMAT_JSON:
self.output = OutputFmt.JSON
elif output.lower() == FORMAT_XML:
_show_warning()
# self.output = OutputFmt.XML
self.output = OutputFmt.JSON
def prepare_callback(self, callback):
if callback is None:
return
self.callback = urlparse(callback)
@property
def prepared_key(self):
return self.key
@property
def prepared_output(self):
if self.output == OutputFmt.JSON:
return FORMAT_JSON
# elif self.output == OutputFmt.XML:
# return FORMAT_XML # elif self.output == OutputFmt.XML:
# return FORMAT_XML
@property
def prepared_callback(self):
if self.callback is not None:
return self.callback.geturl()
@property
def sig(self):
if self._pkey:
return Sig(pkey=self._pkey, kwargs=self.generate_params())
@property
def prepared_sig(self):
if self._pkey:
return self.sig.hashed_sig
@contextlib.contextmanager
def init_basic_params(self, params, optionals=None):
new_params = self._init_basic_params(params)
self._init_optional_params(params, optionals)
# init raw_params
self._init_optional_params(params, self._raw_params)
yield new_params
def _init_basic_params(self, params):
params['key'] = self.prepared_key
params.update(
{'output': self.prepared_output}
if self.prepared_output else {})
params.update(
{'callback': self.prepared_callback}
if self.prepared_callback else {})
return params
@staticmethod
def _init_optional_params(params, optionals):
if optionals:
for opt, opt_v in iteritems(optionals):
params.update({opt: opt_v} if opt_v is not None else {})
return params
class BaseResponseData(object):
ROUTE_KEY = RouteKey.UNKNOWN
def __init__(self, raw_data, version=AMapVersion.V3,
auto_version=False, static_mode=False, raw_mode=False):
if raw_mode:
self._raw_data = raw_data
else:
self._raw_data = json_load_and_fix_amap_empty(raw_data)
self.version = version
self._data = None
if auto_version:
self.version = self.auto_check_version(
self._raw_data, self.version)
if static_mode:
self._data = self._get_static_data()
def __unicode__(self):
return repr_params(('status', 'status_msg', 'count', 'version'),
self.__class__.__name__, self)
def __repr__(self):
return self.__unicode__()
@property
def status(self):
return self._get_status()
@property
def status_msg(self):
return self._get_status_msg()
@property
def count(self):
return self._get_count()
@property
def data(self):
return self._data or self._get_data()
@staticmethod
def auto_check_version(data, default_version=AMapVersion.V3):
if 'errcode' in data:
return AMapVersion.V4
else:
return default_version
def raise_for_status(self):
if self.status == StatusFlag.ERR:
err_status = self.status_msg
raise amap_status_exception(
err_code=err_status.code,
err_msg=err_status.msg or err_status.detail,
data=self)
def _get_status(self):
if self.version == AMapVersion.V3:
return self._get_status_v3(self._raw_data.get('status'))
elif self.version == AMapVersion.V4:
return self._get_status_v4(self._raw_data.get('errcode'))
@staticmethod
def _get_status_v3(status_code):
return StatusFlag.OK if status_code == '1' else StatusFlag.ERR
@staticmethod
def _get_status_v4(status_code):
return StatusFlag.OK if status_code == 0 else StatusFlag.ERR
def _get_status_msg(self):
if self.version == AMapVersion.V3:
return MapStatusMessage.from_args(
code=int(self._raw_data.get('infocode', -1)),
msg=self._raw_data.get('info'),
detail='')
elif self.version == AMapVersion.V4:
return MapStatusMessage.from_args(
code=self._raw_data.get('errcode'),
msg=self._raw_data.get('errmsg'),
detail=self._raw_data.get('errdetail'))
def _get_count(self):
if self.version == AMapVersion.V3:
return int(self._raw_data.get('count', 0))
elif self.version == AMapVersion.V4:
return 0
def _get_data(self):
return self.get_data(self._raw_data)
def _get_static_data(self):
return self.get_data(self._raw_data, static=True)
def get_data(self, raw_data, static=False):
raise NotImplementedError
class LocationMixin(object):
LOCATION_KEY = 'location'
@property
def latitude(self):
try:
return parse_location(getattr(self, self.LOCATION_KEY))[1]
except Exception:
return None
@property
def longitude(self):
try:
return parse_location(getattr(self, self.LOCATION_KEY))[0]
except Exception:
return None
class PolylineMixin(object):
POLYLINE_KEY = 'polyline'
@property
def geo_data(self):
def _decode_raw_polyline(raw_polygon):
return [parse_location(loc) for loc in raw_polygon.split(';')]
raw_data = getattr(self, 'polyline', None)
if raw_data:
raw_polygons = raw_data.split('|')
return MultiPolygon(
[Polygon(_decode_raw_polyline(i)) for i in
raw_polygons])
```
#### File: Thrall/thrall/base.py
```python
from __future__ import absolute_import
from contextlib import contextmanager
from requests.adapters import HTTPAdapter
from requests.sessions import Session
from requests.exceptions import (
RequestException,
ConnectionError,
Timeout,
HTTPError
)
from thrall.compat import basestring
from thrall.exceptions import (
VendorRequestError,
VendorConnectionError,
VendorHTTPError,
)
from .hooks import SetDefault
from .utils import builtin_names, is_func_bound, repr_params
set_default = SetDefault
class BaseRequest(object):
def __init__(self, session=None):
if not isinstance(session, Session):
self.session = Session()
self.session.mount('http://', HTTPAdapter(max_retries=1,
pool_maxsize=50))
self.session.mount('http://', HTTPAdapter(max_retries=1,
pool_maxsize=50))
else:
self.session = session
@set_default
def get(self, url, params, timeout=1, callback=None, **kwargs):
with self.catch_exception():
r = self._get_result(url, params, timeout, **kwargs)
if callable(callback):
callback(r)
return r
@set_default
def post(self, url, data, timeout=1, callback=None, **kwargs):
with self.catch_exception():
r = self._post_result(url, data, timeout, **kwargs)
if callable(callback):
callback(r)
return r
def _get_result(self, url, params, timeout, **kwargs):
r = self.session.get(url, params=params, timeout=timeout, **kwargs)
r.raise_for_status()
return r
def _post_result(self, url, data, timeout, **kwargs):
r = self.session.post(url, data, timeout=timeout, **kwargs)
r.raise_for_status()
return r
@contextmanager
def catch_exception(self):
try:
yield
except(ConnectionError, Timeout) as err:
raise VendorConnectionError(str(err), data=err)
except HTTPError as err:
raise VendorHTTPError(str(err), data=err)
except RequestException as err:
raise VendorRequestError(str(err), data=err)
class BaseData(object):
_properties = ()
def __init__(self, unpacked_data, static=False):
self._data = unpacked_data or {}
self._static = static
if self._static:
self._static_decode()
def __unicode__(self):
return repr_params(self._properties, self.__class__.__name__, self)
def __repr__(self):
return self.__unicode__()
def __getattr__(self, name):
try:
return self._decode(name)
except KeyError:
msg = "'{0}' object has no attribute '{1}'"
raise AttributeError(msg.format(type(self).__name__, name))
def __setattr__(self, name, value):
super(BaseData, self).__setattr__(name, value)
if name in ('_static', '_data'):
return
if name in builtin_names:
name += u'_'
if not self._static:
self._data[name] = value
def __delattr__(self, name):
_get_attr = False
if name in self.__dict__:
_get_attr = True
del self.__dict__[name]
if name in self._data:
_get_attr = True
del self._data[name]
if not _get_attr:
msg = "'{0}' object has no attribute '{1}'"
raise AttributeError(msg.format(type(self).__name__, name))
def _decode(self, p):
if p not in self._properties:
raise KeyError(p)
if p in builtin_names:
p += u'_'
r = self.decode_param(p, self._data)
return r if r is not None else self._data.get(p)
def _static_decode(self):
for i in self._properties:
setattr(self, i, self._decode(i))
def decode_param(self, p, data):
""" Decode data param.
override example:
def decode_param(self, p, data):
# do something from data
# raise KeyError when not found in data
# default handle:
return self._data.get(p)
:param p: data param
:param data: raw data pair
:return: data value
"""
pass
@property
def attrs(self):
return self._properties
class BaseAdapterMixin(object):
_TYPE_ENCODE = 'encode'
_TYPE_DECODE = 'decode'
def __init__(self):
self._registered_coders = {}
@property
def all_registered_coders(self):
return self._registered_coders
def query(self, q):
try:
return self._query(q)
except KeyError:
msg = "'{0}' has no registered function '{1}'"
raise AttributeError(msg.format(type(self).__name__, q))
def _query(self, func):
if is_func_bound(func, self):
return self._registered_coders[func.__name__]
else:
return self._registered_coders[func]
def registry(self, func, coder):
try:
if is_func_bound(func, self):
self._registered_coders[func.__name__] = coder
else:
raise KeyError
except Exception:
raise AttributeError(
'un-support registry function {}'.format(func))
def un_registry(self, func):
try:
if isinstance(func, basestring):
self._registered_coders.pop(func)
elif is_func_bound(func, self):
self._registered_coders.pop(func.__name__)
else:
raise KeyError
except Exception:
raise AttributeError(
"can't un-registry function {}".format(func))
class BaseAdapter(BaseAdapterMixin):
def __init__(self):
super(BaseAdapter, self).__init__()
self.registry_coders()
def registry_coders(self):
raise NotImplementedError
class BaseEncoderAdapter(BaseAdapter):
def registry_coders(self):
return self.registry_encoders()
def registry_encoders(self):
raise NotImplementedError
class BaseDecoderAdapter(BaseAdapter):
def registry_coders(self):
return self.registry_decoders()
def registry_decoders(self):
raise NotImplementedError
```
#### File: Thrall/thrall/hooks.py
```python
from six import iteritems
from .utils import is_func_bound
try:
from functools32 import (
update_wrapper,
wraps,
)
except ImportError:
from functools import (
update_wrapper,
wraps,
)
__all__ = ['SetDefault']
class _SetDefault(object):
def __init__(self, func):
self.func = func
self.default_kwargs = {}
self._own_instance = None
update_wrapper(self, func)
if is_func_bound(func):
self.__self__ = func.__self__
def __call__(self, *args, **kwargs):
for k, v in iteritems(self.default_kwargs):
if k not in kwargs:
kwargs[k] = v
if self._own_instance:
return self.func(self._own_instance, *args, **kwargs)
else:
return self.func(*args, **kwargs)
def __get__(self, instance, owner):
self._own_instance = instance
return self
def set_default(self, **kwargs):
for k, v in iteritems(kwargs):
self.default_kwargs[k] = v
class _SetDefaultWithParams(object):
def __init__(self, **kwargs):
self.default_kwargs = kwargs
def __call__(self, fn):
@wraps(fn)
def __wrapper(*args, **kwargs):
for k, v in iteritems(self.default_kwargs):
if k not in kwargs:
kwargs[k] = v
return fn(*args, **kwargs)
return __wrapper
def set_default(self, **kwargs):
for k, v in iteritems(kwargs):
self.default_kwargs[k] = v
class SetD(_SetDefault, _SetDefaultWithParams):
def __init__(self, *args, **kwargs):
if self.is_func_direct(args):
_SetDefault.__init__(self, args[0])
self._d = True
else:
_SetDefaultWithParams.__init__(self, **kwargs)
self._d = False
def __call__(self, *args, **kwargs):
if self._d:
return _SetDefault.__call__(self, *args, **kwargs)
else:
return _SetDefaultWithParams.__call__(self, args[0])
@staticmethod
def is_func_direct(args):
if args and callable(args[0]):
return True
SetDefault = SetD
``` |
{
"source": "649453932/bert4keras",
"score": 2
} |
#### File: bert4keras/examples/task_seq2seq.py
```python
from __future__ import print_function
import glob, os, json
import numpy as np
from tqdm import tqdm
from bert4keras.backend import keras, K
from bert4keras.bert import build_bert_model
from bert4keras.tokenizer import Tokenizer, load_vocab
from bert4keras.optimizers import Adam
from bert4keras.snippets import parallel_apply, sequence_padding
from bert4keras.snippets import DataGenerator
from bert4keras.snippets import open
seq2seq_config = 'seq2seq_config.json'
min_count = 128
maxlen = 256
batch_size = 16
steps_per_epoch = 1000
epochs = 10000
# bert配置
config_path = '/root/kg/bert/chinese_wwm_L-12_H-768_A-12/bert_config.json'
checkpoint_path = '/root/kg/bert/chinese_wwm_L-12_H-768_A-12/bert_model.ckpt'
dict_path = '/root/kg/bert/chinese_wwm_L-12_H-768_A-12/vocab.txt'
# 训练样本。THUCNews数据集,每个样本保存为一个txt。
txts = glob.glob('/root/thuctc/THUCNews/*/*.txt')
_token_dict = load_vocab(dict_path) # 读取词典
_tokenizer = Tokenizer(_token_dict, do_lower_case=True) # 建立临时分词器
if os.path.exists(seq2seq_config):
tokens = json.load(open(seq2seq_config))
else:
def _batch_texts():
texts = []
for txt in txts:
text = open(txt, encoding='utf-8').read()
texts.append(text)
if len(texts) == 100:
yield texts
texts = []
if texts:
yield texts
def _tokenize_and_count(texts):
_tokens = {}
for text in texts:
for token in _tokenizer.tokenize(text):
_tokens[token] = _tokens.get(token, 0) + 1
return _tokens
tokens = {}
def _total_count(result):
for k, v in result.items():
tokens[k] = tokens.get(k, 0) + v
# 10进程来完成词频统计
parallel_apply(
func=_tokenize_and_count,
iterable=tqdm(_batch_texts(), desc=u'构建词汇表中'),
workers=10,
max_queue_size=100,
callback=_total_count,
# dummy=True, # 如果在Windows跑,请设置dummy=True
)
tokens = [(i, j) for i, j in tokens.items() if j >= min_count]
tokens = sorted(tokens, key=lambda t: -t[1])
tokens = [t[0] for t in tokens]
json.dump(tokens,
open(seq2seq_config, 'w', encoding='utf-8'),
indent=4,
ensure_ascii=False)
token_dict, keep_words = {}, [] # keep_words是在bert中保留的字表
for t in ['[PAD]', '[UNK]', '[CLS]', '[SEP]']:
token_dict[t] = len(token_dict)
keep_words.append(_token_dict[t])
for t in tokens:
if t in _token_dict and t not in token_dict:
token_dict[t] = len(token_dict)
keep_words.append(_token_dict[t])
tokenizer = Tokenizer(token_dict, do_lower_case=True) # 建立分词器
class data_generator(DataGenerator):
"""数据生成器
"""
def __iter__(self, random=False):
idxs = list(range(len(self.data)))
if random:
np.random.shuffle(idxs)
batch_token_ids, batch_segment_ids = [], []
for i in idxs:
txt = self.data[i]
text = open(txt, encoding='utf-8').read()
text = text.split('\n')
if len(text) > 1:
title = text[0]
content = '\n'.join(text[1:])
token_ids, segment_ids = tokenizer.encode(content,
title,
max_length=maxlen)
batch_token_ids.append(token_ids)
batch_segment_ids.append(segment_ids)
if len(batch_token_ids) == self.batch_size or i == idxs[-1]:
batch_token_ids = sequence_padding(batch_token_ids)
batch_segment_ids = sequence_padding(batch_segment_ids)
yield [batch_token_ids, batch_segment_ids], None
batch_token_ids, batch_segment_ids = [], []
model = build_bert_model(
config_path,
checkpoint_path,
application='seq2seq',
keep_words=keep_words, # 只保留keep_words中的字,精简原字表
)
model.summary()
# 交叉熵作为loss,并mask掉输入部分的预测
y_in = model.input[0][:, 1:] # 目标tokens
y_mask = model.input[1][:, 1:]
y = model.output[:, :-1] # 预测tokens,预测与目标错开一位
cross_entropy = K.sparse_categorical_crossentropy(y_in, y)
cross_entropy = K.sum(cross_entropy * y_mask) / K.sum(y_mask)
model.add_loss(cross_entropy)
model.compile(optimizer=Adam(1e-5))
def gen_sent(s, topk=2, title_maxlen=32):
"""beam search解码
每次只保留topk个最优候选结果;如果topk=1,那么就是贪心搜索
"""
content_maxlen = maxlen - title_maxlen
token_ids, segment_ids = tokenizer.encode(s, max_length=content_maxlen)
target_ids = [[] for _ in range(topk)] # 候选答案id
target_scores = [0] * topk # 候选答案分数
for i in range(title_maxlen): # 强制要求输出不超过title_maxlen字
_target_ids = [token_ids + t for t in target_ids]
_segment_ids = [segment_ids + [1] * len(t) for t in target_ids]
_probas = model.predict([_target_ids, _segment_ids
])[:, -1, 3:] # 直接忽略[PAD], [UNK], [CLS]
_log_probas = np.log(_probas + 1e-6) # 取对数,方便计算
_topk_arg = _log_probas.argsort(axis=1)[:, -topk:] # 每一项选出topk
_candidate_ids, _candidate_scores = [], []
for j, (ids, sco) in enumerate(zip(target_ids, target_scores)):
# 预测第一个字的时候,输入的topk事实上都是同一个,
# 所以只需要看第一个,不需要遍历后面的。
if i == 0 and j > 0:
continue
for k in _topk_arg[j]:
_candidate_ids.append(ids + [k + 3])
_candidate_scores.append(sco + _log_probas[j][k])
_topk_arg = np.argsort(_candidate_scores)[-topk:] # 从中选出新的topk
target_ids = [_candidate_ids[k] for k in _topk_arg]
target_scores = [_candidate_scores[k] for k in _topk_arg]
best_one = np.argmax(target_scores)
if target_ids[best_one][-1] == 3:
return tokenizer.decode(target_ids[best_one])
# 如果title_maxlen字都找不到结束符,直接返回
return tokenizer.decode(target_ids[np.argmax(target_scores)])
def just_show():
s1 = u'夏天来临,皮肤在强烈紫外线的照射下,晒伤不可避免,因此,晒后及时修复显得尤为重要,否则可能会造成长期伤害。专家表示,选择晒后护肤品要慎重,芦荟凝胶是最安全,有效的一种选择,晒伤严重者,还请及 时 就医 。'
s2 = u'8月28日,网络爆料称,华住集团旗下连锁酒店用户数据疑似发生泄露。从卖家发布的内容看,数据包含华住旗下汉庭、禧玥、桔子、宜必思等10余个品牌酒店的住客信息。泄露的信息包括华住官网注册资料、酒店入住登记的身份信息及酒店开房记录,住客姓名、手机号、邮箱、身份证号、登录账号密码等。卖家对这个约5亿条数据打包出售。第三方安全平台威胁猎人对信息出售者提供的三万条数据进行验证,认为数据真实性非常高。当天下午 ,华 住集 团发声明称,已在内部迅速开展核查,并第一时间报警。当晚,上海警方消息称,接到华住集团报案,警方已经介入调查。'
for s in [s1, s2]:
print(u'生成标题:', gen_sent(s))
print()
class Evaluate(keras.callbacks.Callback):
def __init__(self):
self.lowest = 1e10
def on_epoch_end(self, epoch, logs=None):
# 保存最优
if logs['loss'] <= self.lowest:
self.lowest = logs['loss']
model.save_weights('./best_model.weights')
# 演示效果
just_show()
if __name__ == '__main__':
evaluator = Evaluate()
train_generator = data_generator(txts, batch_size)
model.fit_generator(train_generator.forfit(),
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=[evaluator])
else:
model.load_weights('./best_model.weights')
``` |
{
"source": "64-B1T/basic_robotics",
"score": 3
} |
#### File: basic_robotics/general/faser_general.py
```python
import math
from . import faser_high_performance as mr
import numpy as np
import scipy as sci
import scipy.linalg as ling
from .faser_transform import tm
#TRANSFORMATION MATRIX MANIPULATIONS
def TAAtoTM(taa_format):
"""
Converts Translation Axis Angle to Transformation Matrix
Args:
taa_format (ndarray): TAA representation of given transformation.
Returns:
transformation_matrix: 4x4 transformation matrix representation
"""
taa_format = taa_format.reshape((6))
mres = mr.MatrixExp3(mr.VecToso3(taa_format[3:6]))
#return mr.RpToTrans(mres, transaa[0:3])
taa_format = taa_format.reshape((6, 1))
tm = np.vstack((np.hstack((mres, taa_format[0:3])), np.array([0, 0, 0, 1])))
#print(tm)
return tm
def TMtoTAA(transformation_matrix):
"""
Converts a 4x4 transformation matrix to TAA representation
Args:
transformation_matrix: transformation matrix to be converted
Returns:
TAA representation
"""
rotation_matrix, position = mr.TransToRp(transformation_matrix)
rotation_array = mr.so3ToVec(mr.MatrixLog3(rotation_matrix))
return np.vstack((position.reshape((3, 1)), angleMod(rotation_array.reshape((3, 1)))))
#Change of Frames
def localToGlobal(reference, rel):
"""
Converts a transform in a local frame to the global frame
Args:
reference (temp): Transform of frame A to frame B
rel (tm): Transform of object 1 in frame B
Returns:
tm: Transform of object 1 in frame A
"""
return tm(mr.LocalToGlobal(reference.gTAA(), rel.gTAA()))
def globalToLocal(reference, rel):
"""
Convert a transform in a global frame to a local frame
Args:
reference (tm): Transform of frame A to frame B
rel (tm): Transform of object 1 in frame A
Returns:
tm: Transform of object 1 in frame B
"""
return tm(mr.GlobalToLocal(reference.gTAA(), rel.gTAA()))
#Transformation Matrix Group Functions
def planeFromThreePoints(ref_point_1, ref_point_2, ref_point_3):
"""
Creates the equation of a plane from three points
Args:
ref_point_1: tm or vector for point 1
ref_point_2: tm or vector for point 2
ref_point_3: tm or vector for point 3
Returns:
a, b, c, d: equation cooficients of a plane
"""
p1 = np.array(ref_point_1[0:3]).flatten()
p2 = np.array(ref_point_2[0:3]).flatten()
p3 = np.array(ref_point_3[0:3]).flatten()
v1 = p3 - p1
v2 = p2 - p1
# the cross product is a vector normal to the plane
cp = np.cross(v1, v2)
a, b, c = cp
# This evaluates a * x3 + b * y3 + c * z3 which equals d
d = np.dot(cp, p3)
return a, b, c, d
def planePointsFromTransform(ref_point_1):
"""
Create plane TM points from one Transform (using unit vectors)
Args:
ref_point_1: transform to place plane on
Returns:
a, b, c: Plane basis points
"""
a, b, c = ref_point_1.tripleUnit()
return ref_point_1, b, c
def mirror(origin, mirror_plane):
"""
Mirrors a point about a plane
Args:
origin: point to be mirrored
mirror_plane: tm describing plane to mirror over
Returns:
mirrored Point
"""
t1, t2, t3 = planePointsFromTransform(origin)
a, b, c, d = planeFromThreePoints(t1, t2, t3)
x1 = mirror_plane[0]
y1 = mirror_plane[1]
z1 = mirror_plane[2]
k = (-a * x1 - b * y1 - c * z1 - d) / float((a * a + b * b + c * c))
x2 = a * k + x1
y2 = b * k + y1
z2 = c * k + z1
x3 = 2 * x2-x1
y3 = 2 * y2-y1
z3 = 2 * z2-z1
return tm([x3, y3, z3, 0, 0, 0])
def adjustRotationToMidpoint(active_point, ref_point_1, ref_point_2, mode = 0):
"""
Applies the midpoint transform of reference points 1 and 2 to an active point
Args:
active_point (tm): Point to be modified
ref_point_1 (tm): origin point for midpoint calculation
ref_point_2 (tm): goal point for midpoint calculation
mode (int): Mode of midpoint calculation. 0, TMMIDPOINT. 1, ROTFROMVEC
Returns:
tm: Modified version of active_point with orientation of vector 1 -> 2
"""
modified_point = active_point.copy()
if mode != 1:
t_mid = TMMidPoint(ref_point_1, ref_point_2)
modified_point[3:6] = t_mid[3:6]
else:
modified_point[3:6] = RotFromVec(ref_point_1, ref_point_2)[3:6]
return modified_point
def tmAvgMidpoint(ref_point_1, ref_point_2):
"""
Simplest version of a midpoint calculation. Simply the average of two positions
Args:
ref_point_1 (tm): position 1
ref_point_2 (tm): position 2
Returns:
tm: midpoint average of positions 1 and 2
"""
return (ref_point_1 + ref_point_2)/2
def tmInterpMidpoint(ref_point_1, ref_point_2):
"""
Better version of midpoint calculation
Position is stil average of positions 1 and 2
but rotation is calculated as a proper interpolation
Args:
ref_point_1 (tm): position 1
ref_point_2 (tm): position 2
Returns:
tm: midpoint of positions 1 and 2
"""
taar = np.zeros((6, 1))
taar[0:3] = (ref_point_1[0:3] + ref_point_2[0:3])/2
R1 = mr.MatrixExp3(mr.VecToso3(ref_point_1[3:6].reshape((3))))
R2 = mr.MatrixExp3(mr.VecToso3(ref_point_2[3:6].reshape((3))))
Re = (R1 @ (R2.conj().T)).conj().T
Re2 = mr.MatrixExp3(mr.VecToso3(mr.so3ToVec(mr.MatrixLog3((Re)/2))))
rmid = Re2 @ R1
taar[3:6] = mr.so3ToVec(mr.MatrixLog3((rmid))).reshape((3, 1))
return tm(taar)
def get_surface_normal(tri):
u = tri[1] - tri[0]
v = tri[2] - tri[0]
x = u[1] * v[2] - u[2] * v[1]
y = u[2] * v[0] - u[0] * v[2]
z = u[0] * v[1] - u[1] * v[0]
center = tm([(tri[0][0] + tri[1][0] + tri[2][0])/3,
(tri[0][1] + tri[1][1] + tri[2][1])/3,
(tri[0][2] + tri[1][2] + tri[2][2])/3, 0, 0, 0])
nvec = np.array([x, y, z])
unit_vec = nvec/np.linalg.norm(nvec)/2
unit_out = tm([unit_vec[0], unit_vec[1], unit_vec[2], 0, 0, 0])
return center, unit_out
#Rotations/Viewers
def rotationFromVector(ref_point_1, ref_point_2):
"""
Reorients ref_point_1 such that its z axis is pointing towards ref_point_2
Args:
ref_point_1 (tm): position 1
ref_point_2 (tm): position 2
Returns:
tm: ref_point_1 where z points to position 2
"""
d = math.sqrt((ref_point_2[0] - ref_point_1[0])**2 + (ref_point_2[1] - ref_point_1[1])**2 + (ref_point_2[2] - ref_point_1[2])**2)
res = lambda x : distance(tm([ref_point_1[0], ref_point_1[1], ref_point_1[2], x[0], x[1], ref_point_1[5]]) @ tm([0, 0, d, 0, 0, 0]), ref_point_2)
x0 = np.array([ref_point_1[3], ref_point_1[4]])
xs = sci.optimize.fmin(res, x0, disp=False)
ref_point_1[3:5] = xs.reshape((2))
return ref_point_1
def lookAt(ref_point_1, ref_point_2):
"""
Alternate version of RotFromVec, however rotation *about* z axis may be more random
Does not depend on sci optimize fmin
Args:
ref_point_1 (tm): position 1
ref_point_2 (tm): position 2
Returns:
tm: point at ref_point_1 where z points to position 2
"""
upa = (ref_point_1 @ tm([-1, 0, 0, 0, 0, 0]))
up = upa[0:3].flatten()
va = ref_point_1[0:3].flatten()
vb = ref_point_2[0:3].flatten()
zax = mr.mr.Normalize(vb-va)
xax = mr.mr.Normalize(np.cross(up, zax))
yax = np.cross(zax, xax)
R2 = np.eye(4)
R2[0:3, 0:3] = np.array([xax, yax, zax]).T
R2[0:3, 3] = va
ttm = tm(R2)
return ttm
#Error and Distance Functions
def poseError(ref_point_1, ref_point_2):
"""
Provides absolute error between two transformations
Args:
ref_point_1 (tm): Reference point 1
ref_point_2 (tm): Reference point 2
Returns:
tm: absolute error
"""
return abs(ref_point_1 - ref_point_2)
def geometricError(ref_point_1, ref_point_2):
"""
Provides geometric error between two points
Args:
ref_point_1 (tm): Reference point 1
ref_point_2 (tm): Reference point 2
Returns:
tm: geometric error
"""
return globalToLocal(ref_point_2, ref_point_1)
def distance(ref_point_1, ref_point_2):
"""
Calculates straight line distance between two points (2d or 3d (tm))
Args:
ref_point_1 (tm): Reference point 1
ref_point_2 (tm): Reference point 2
Returns:
float: distance between points 1 and 2
"""
try:
d = math.sqrt((ref_point_2[0] - ref_point_1[0])**2 + (ref_point_2[1] - ref_point_1[1])**2 + (ref_point_2[2] - ref_point_1[2])**2)
except:
d = math.sqrt((ref_point_2[0] - ref_point_1[0])**2 + (ref_point_2[1] - ref_point_1[1])**2)
return d
def arcDistance(ref_point_1, ref_point_2):
"""
Calculates the arc distance between two points
(magnitude average of geometric error)
Args:
ref_point_1 (tm): Reference point 1
ref_point_2 (tm): Reference point 2
Returns:
float: arc distance between two points
"""
geo_error = globalToLocal(ref_point_1, ref_point_2)
d = math.sqrt(geo_error[0]**2 + geo_error[1]**2 + geo_error[2]**2 + geo_error[3]**2 +geo_error[4]**2 + geo_error[5]**2)
return d
#Gap Closure
def closeLinearGap(origin_point, goal_point, delta):
"""
Close linear gap between two points by delta amount
Args:
origin_point (tm): Current point in trajectory
goal_point (tm): Goal to advance towards
delta (float): Amount to advance
Returns:
tm: new, closer position to goal
"""
origin_to_goal = goal_point - origin_point
#normalize
return_transform = np.zeros((6, 1))
var = math.sqrt(origin_to_goal[0]**2 + origin_to_goal[1]**2 + origin_to_goal[2]**2)
#print(var, "var")
if var == 0:
var = 0
for i in range(6):
return_transform[i] = origin_point.TAA[i] + (origin_to_goal[i] / var) * delta
#xf = origin_point @ TAAtoTM(return_transform)
return tm(return_transform)
def closeArcGap(origin_point, goal_point, delta):
"""
Closes gap to goal using arc method instead of linear
Args:
origin_point (tm): Current point in trajectory
goal_point (tm): Goal to advance towards
delta (float): Amount to advance
Returns:
tm: new, closer position to goal
"""
origin_to_goal = goal_point - origin_point
#normalize
return_transform = np.zeros((6, 1))
var = math.sqrt(origin_to_goal[0]**2 + origin_to_goal[1]**2 + origin_to_goal[2]**2)
#print(var, "var")
if var == 0:
var = 0
for i in range(6):
return_transform[i] = (origin_to_goal[i] / var) * delta
xf = origin_point @ TAAtoTM(return_transform)
return xf
def IKPath(initial, goal, steps):
"""
Creates a simple oath from origin to goal witha given number of steps
Args:
initial (tm): Initial Position
goal (tm): Goal position
steps (int): number of steps to take
Returns:
[tm]: list of transforms representing trajectory
"""
delta = (goal.gTAA() - initial.gTAA())/steps
pose_list = []
for i in range(steps):
pos = tm(initial.gTAA() + delta * i)
pose_list.append(pos)
return pose_list
#ANGLE HELPERS
def deg2Rad(deg):
"""
Convert degrees to radians
Args:
deg (float): measure of angles in degrees
Returns:
float: measure of angles in radians
"""
return deg * np.pi / 180
def rad2Deg(rad):
"""
Converts radians to degrees
Args:
rad (float): measure of angles in radians
Returns:
float: measure of angles in degrees
"""
return rad * 180 / np.pi
def angleMod(rad):
"""
Cuts angles in radians such that they don't exceed 2pi absolute
Args:
rad (float): angle or angles
Returns:
float: cut down angle or angles
"""
if isinstance(rad, tm):
return rad.AngleMod();
if np.size(rad) == 1:
if abs(rad) > 2 * np.pi:
rad = rad % (2 * np.pi)
return rad
if np.size(rad) == 6:
for i in range(3, 6):
if abs(rad[i]) > 2 * np.pi:
rad[i] = rad[i] % (2 * np.pi)
return rad
for i in range(np.size(rad)):
if abs(rad[i]) > 2 * np.pi:
rad[i] = rad[i] % (2 * np.pi)
return rad
def angleBetween(ref_point_1, ref_point_2, ref_point_3):
"""
Calculates the interior angle between points 1, 2, and 3
Args:
ref_point_1 (tm): Reference point 1
ref_point_2 (tm): Reference point 2
ref_point_3 (tm): Reference point 3
Returns:
float: Angle between points 1, 2, and 3 in 3D-Space
"""
v1 = np.array([ref_point_1[0]-ref_point_2[0], ref_point_1[1]-ref_point_2[1], ref_point_1[2] - ref_point_2[2]])
#v1n = mr.mr.Normalize(v1)
v1n = np.linalg.norm(v1)
v2 = np.array([ref_point_3[0]-ref_point_2[0], ref_point_3[1]-ref_point_2[1], ref_point_3[2] - ref_point_2[2]])
#v2n = mr.mr.Normalize(v2)
v2n = np.linalg.norm(v2)
res = np.clip(np.dot(v1, v2)/(v1n*v2n), -1, 1)
#res = np.clip(np.dot(np.squeeze(v1n), np.squeeze(v2n)), -1, 1)
res = AngleMod(math.acos(res))
return res
#Wrench Operations
def makeWrench(position_applied, force, force_direction_vector):
"""
Generates a new wrench
Args:
position_applied: relative position ation of wrench
force: magnitude of force applied (or mass if force_direction_vector is a gravity vector)
force_direction_vector: unit vector to apply force (or gravity)
Returns:
wrench
"""
forcev = np.array(force_direction_vector) * force #Force vector (negative Z)
t_wren = np.cross(position_applied[0:3].reshape((3)), forcev) #Calculate moment based on position and action
wrench = np.array([t_wren[0], t_wren[1], t_wren[2], forcev[0], forcev[1], forcev[2]]).reshape((6, 1)) #Create Complete Wrench
return wrench
def transformWrenchFrame(wrench, old_wrench_frame, new_wrench_frame):
"""
Translates one wrench frame to another
Args:
wrench: original wrench to be translated
old_wrench_frame: the original frame that the wrench was in (tm)
new_wrench_frame: the new frame that the wrench *should* be in (tm)
Returns:
new Wrench in the frame of new_wrench_frame
"""
ref = globalToLocal(old_wrench_frame, new_wrench_frame)
return ref.adjoint().T @ wrench
#Twists
def twistToScrew(input_twist):
"""
Converts a twist to a screw
Args:
input_twist (ndarray): Twist
Returns:
ndarray: Screw representing twist
"""
if (mr.Norm(input_twist[0:3])) == 0:
w = mr.Normalize(input_twist[0:6])
th = mr.Norm(input_twist[3:6])
q = np.array([0, 0, 0]).reshape((3, 1))
h = np.inf
else:
unit_twist = input_twist/mr.Norm(input_twist[0:3])
w = unit_twist[0:3].reshape((3))
v = unit_twist[3:6].reshape((3))
th = mr.Norm(input_twist[0:3])
q = np.cross(w, v)
h = (v.reshape((3, 1)) @ w.reshape((1, 3)))
return (w, th, q, h)
def normalizeTwist(twist):
"""
Normalize a Twist
Args:
tw (ndarray): Input twist
Returns:
ndarray: Normalized Twist
"""
if mr.Norm(twist[0:3]) > 0:
twist_norm = twist/mr.Norm(twist[0:3])
else:
twist_norm = twist/mr.Norm(twist[3:6])
return twist_norm
def twistFromTransform(input_transform):
"""
Creates twist from transform (tm)
Args:
input_transform (tm): input transform
Returns:
ndarray: twist representing transform
"""
transform_skew = mr.MatrixLog6(input_transform.TM)
return mr.se3ToVec(transform_skew)
def transformFromTwist(input_twist):
"""
Converts a twist to a transformation matrix
Args:
input_twist (ndarray): Input twist to be transformed
Returns:
tm: Transform represented by twist
"""
input_twist = input_twist.reshape((6))
#print(tw)
tms = mr.VecTose3(input_twist)
tms = delMini(tms)
tmr = mr.MatrixExp6(tms)
return tm(tmr)
def transformByVector(transform, vec):
"""
Performs tv = TM*vec and removes the 1
Args:
transform (tm): transform to operate on
vec (ndarray): vector to multipy
Returns:
ndarray: vector product
"""
transform_matrix = transform.TM
b = np.array([1.0])
n = np.concatenate((vec, b))
trvh = transform_matrix @ n
return trvh[0:3]
#def RotationAroundVector(w, theta):
# r = np.identity(3)+math.sin(theta) * rp.skew(w)+(1-math.cos(theta)) * rp.skew(w) @ rp.skew(w)
# return r
# Unit Vectors
def fiboSphere(num_points):
"""
Create Fibonacci points on the surface of a sphere
#https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere
Args:
num_points: number of points
Returns:
xyzcoords: points in cartesian coordinates
"""
indices = np.arange(0, num_points, dtype=float) + 0.5
phi = np.arccos(1 - 2*indices/num_points)
theta = np.pi * (1 + 5**0.5) * indices
x, y, z = np.cos(theta) * np.sin(phi), np.sin(theta) * np.sin(phi), np.cos(phi);
xyzcoords = np.array([x, y, z]).T
return xyzcoords
def unitSphere(num_points):
"""
Generates a "unit sphere" with an approximate number of points
numActual = round(num_points)^2
Args:
num_points: Approximate number of points to collect
Returns:
xyzcoords: points in cartesian coordinates
azel: coords in azimuth/elevation notation
"""
xyzcoords = []
azel = []
azr = round(math.sqrt(num_points))
elr = round(math.sqrt(num_points))
inc = np.pi * 2 / azr
incb = 2 / elr
a = 0
e = -1
for i in range(azr + 1):
arccos_e = np.arccos(np.clip(e, -1.0, 1.0))
sin_arccos_e = np.sin(arccos_e)
for j in range(elr + 1):
x = np.cos(a) * sin_arccos_e
y = np.sin(a) * sin_arccos_e
z = np.cos(arccos_e)
xyzcoords.append([x, y, z])
azel.append([a, e])
a = a + inc
e = e + incb
a = -1
return xyzcoords, azel
def getUnitVec(ref_point_1, ref_point_2, distance = 1.0):
"""
Returns a vector of a given length pointed from point 1 to point 2
Args:
ref_point_1 (tm): Reference point 1
ref_point_2 (tm): Reference point 2
distance (Float): length of the returned vector. Defaults to 1
Returns:
tm: transform representing vector
"""
v1 = np.array([ref_point_1[0], ref_point_1[1], ref_point_1[2]])
unit_b = (np.array([ref_point_2[0], ref_point_2[1], ref_point_2[2]]) - v1)
unit = unit_b / ling.norm(unit_b)
pos = v1 + (unit * distance)
return tm([pos[0], pos[1], pos[2], 0, 0, 0])
#Jacobians
def chainJacobian(screws, theta):
"""
Chain Jacobian
Args:
Screws: screw list
theta: theta to evaluate at
Returns:
jac: chain jacobian
"""
jac = np.zeros((6, np.size(theta)))
T = np.eye(4)
jac[0:6, 0] = screws[0:6, 0]
for i in range(1, np.size(theta)):
T = T * TransformFromTwist(theta[i-1]*screws[1:6, i-1])
jac[0:6, i] = mr.Adjoint(T)*screws[0:6, i]
return jac
def numericalJacobian(f, x0, h):
"""
Calculates a numerical jacobian
Args:
f: function handle (FK)
x0: initial value
h: delta value
Returns:
dfdx: numerical Jacobian
"""
x0p = np.copy(x0)
x0p[0] = x0p[0] + h
x0m = np.copy(x0)
x0m[0] = x0m[0] - h
dfdx = (f(x0p)-f(x0m))/(2*h)
for i in range(1, x0.size):
x0p = np.copy(x0)
x0p[i] = x0p[i] + h
x0m = np.copy(x0)
x0m[i] = x0m[i] - h
#Conversion paused here. continue evalutation
dfdx=np.concatenate((dfdx,(f(x0p)-f(x0m))/(2*h)), axis = 0)
dfdx=dfdx.conj().T
f(x0)
return dfdx
#Misc
def boxSpatialInertia(m, l, w, h):
"""
Calculates spatial inertial properties of a box
Args:
m (float): mass of box
l (float): length of box
w (float): width of box
h (float): height of box
Returns:
ndarray: spatial inertia matrix of box
"""
Ixx = m*(w*w+h*h)/12
Iyy = m*(l*l+h*h)/12
Izz = m*(w*w+l*l)/12
Ib = np.diag((Ixx, Iyy, Izz))
Gbox = np.vstack((np.hstack((Ib, np.zeros((3, 3)))), np.hstack((np.zeros((3, 3)), m*np.identity((3))))))
return Gbox
def delMini(arr):
"""
Deletes subarrays of dimension 1
Requires 2d array
Args:
arr: array to prune
Returns:
newarr: pruned array
"""
s = arr.shape
newarr = np.zeros((s))
for i in range(s[0]):
for j in range(s[1]):
newarr[i, j] = arr[i, j]
return newarr
def setElements(data, inds, vals):
"""
Sets the elements in data specified by inds with the values in vals
Args:
data (ndarray): data to edit
inds (ndarray): indexes of data to access
vals (ndarray): new values to insert into the data
Returns:
ndarray: modified data
"""
res = data.copy()
for i in range(len(inds)):
res[inds[i]] = vals[i]
#res[inds] = vals
#for i in range (0,(inds.size-1)):
# res[inds[i]] = vals[i]
return res
# DEPRECATED FUNCTION HANDLES
import traceback
def LocalToGlobal(reference, rel):
"""Deprecation notice function. Please use indicated correct function"""
print(LocalToGlobal.__name__ + ' is deprecated, use ' + localToGlobal.__name__ + ' instead')
traceback.print_stack(limit=2)
return localToGlobal(reference, rel)
def GlobalToLocal(reference, rel):
"""Deprecation notice function. Please use indicated correct function"""
print(GlobalToLocal.__name__ + ' is deprecated, use ' + globalToLocal.__name__ + ' instead')
traceback.print_stack(limit=2)
return globalToLocal(reference, rel)
def PlaneFrom3Tms(ref_point_1, ref_point_2, ref_point_3):
"""Deprecation notice function. Please use indicated correct function"""
print(PlaneFrom3Tms.__name__ + ' is deprecated, use ' + planeFromThreePoints.__name__ + ' instead')
traceback.print_stack(limit=2)
return planeFromThreePoints(ref_point_1, ref_point_2, ref_point_3)
def PlaneTMSFromOne(ref_point_1):
"""Deprecation notice function. Please use indicated correct function"""
print(PlaneTMSFromOne.__name__ + ' is deprecated, use ' + planePointsFromTransform.__name__ + ' instead')
traceback.print_stack(limit=2)
return planePointsFromTransform(ref_point_1)
def Mirror(origin, mirror_plane):
"""Deprecation notice function. Please use indicated correct function"""
print(Mirror.__name__ + ' is deprecated, use ' + mirror.__name__ + ' instead')
traceback.print_stack(limit=2)
return mirror(origin, mirror_plane)
def TMMidRotAdjust(active_point, ref_point_1, ref_point_2, mode = 0):
"""Deprecation notice function. Please use indicated correct function"""
print(TMMidRotAdjust.__name__ + ' is deprecated, use ' + adjustRotationToMidpoint.__name__ + ' instead')
traceback.print_stack(limit=2)
return adjustRotationToMidpoint(active_point, ref_point_1, ref_point_2, mode = 0)
def TMMidPointEx(ref_point_1, ref_point_2):
"""Deprecation notice function. Please use indicated correct function"""
print(TMMidPointEx.__name__ + ' is deprecated, use ' + tmAvgMidpoint.__name__ + ' instead')
traceback.print_stack(limit=2)
return tmAvgMidpoint(ref_point_1, ref_point_2)
def TMMidPoint(ref_point_1, ref_point_2):
"""Deprecation notice function. Please use indicated correct function"""
print(TMMidPoint.__name__ + ' is deprecated, use ' + tmInterpMidpoint.__name__ + ' instead')
traceback.print_stack(limit=2)
return tmInterpMidpoint(ref_point_1, ref_point_2)
def RotFromVec(ref_point_1, ref_point_2):
"""Deprecation notice function. Please use indicated correct function"""
print(RotFromVec.__name__ + ' is deprecated, use ' + rotationFromVector.__name__ + ' instead')
traceback.print_stack(limit=2)
return rotationFromVector(ref_point_1, ref_point_2)
def lookat(ref_point_1, ref_point_2):
"""Deprecation notice function. Please use indicated correct function"""
print(lookat.__name__ + ' is deprecated, use ' + lookAt.__name__ + ' instead')
traceback.print_stack(limit=2)
return lookAt(ref_point_1, ref_point_2)
def Error(ref_point_1, ref_point_2):
"""Deprecation notice function. Please use indicated correct function"""
print(Error.__name__ + ' is deprecated, use ' + poseError.__name__ + ' instead')
traceback.print_stack(limit=2)
return poseError(ref_point_1, ref_point_2)
def GeometricError(ref_point_1, ref_point_2):
"""Deprecation notice function. Please use indicated correct function"""
print(GeometricError.__name__ + ' is deprecated, use ' + geometricError.__name__ + ' instead')
traceback.print_stack(limit=2)
return geometricError(ref_point_1, ref_point_2)
def Distance(ref_point_1, ref_point_2):
"""Deprecation notice function. Please use indicated correct function"""
print(Distance.__name__ + ' is deprecated, use ' + distance.__name__ + ' instead')
traceback.print_stack(limit=2)
return distance(ref_point_1, ref_point_2)
def ArcDistance(ref_point_1, ref_point_2):
"""Deprecation notice function. Please use indicated correct function"""
print(ArcDistance.__name__ + ' is deprecated, use ' + arcDistance.__name__ + ' instead')
traceback.print_stack(limit=2)
return arcDistance(ref_point_1, ref_point_2)
def CloseGap(origin_point, goal_point, delta):
"""Deprecation notice function. Please use indicated correct function"""
print(CloseGap.__name__ + ' is deprecated, use ' + closeLinearGap.__name__ + ' instead')
traceback.print_stack(limit=2)
return closeLinearGap(origin_point, goal_point, delta)
def ArcGap(origin_point, goal_point, delta):
"""Deprecation notice function. Please use indicated correct function"""
print(ArcGap.__name__ + ' is deprecated, use ' + closeArcGap.__name__ + ' instead')
traceback.print_stack(limit=2)
return closeArcGap(origin_point, goal_point, delta)
def Deg2Rad(deg):
"""Deprecation notice function. Please use indicated correct function"""
print(Deg2Rad.__name__ + ' is deprecated, use ' + deg2Rad.__name__ + ' instead')
traceback.print_stack(limit=2)
return deg2Rad(deg)
def Rad2Deg(rad):
"""Deprecation notice function. Please use indicated correct function"""
print(Rad2Deg.__name__ + ' is deprecated, use ' + rad2Deg.__name__ + ' instead')
traceback.print_stack(limit=2)
return rad2Deg(rad)
def AngleMod(rad):
"""Deprecation notice function. Please use indicated correct function"""
print(AngleMod.__name__ + ' is deprecated, use ' + angleMod.__name__ + ' instead')
traceback.print_stack(limit=2)
return angleMod(rad)
def AngleBetween(ref_point_1, ref_point_2, ref_point_3):
"""Deprecation notice function. Please use indicated correct function"""
print(AngleBetween.__name__ + ' is deprecated, use ' + angleBetween.__name__ + ' instead')
traceback.print_stack(limit=2)
return angleBetween(ref_point_1, ref_point_2, ref_point_3)
def GenForceWrench(position_applied, force, force_direction_vector):
"""Deprecation notice function. Please use indicated correct function"""
print(GenForceWrench.__name__ + ' is deprecated, use ' + makeWrench.__name__ + ' instead')
traceback.print_stack(limit=2)
return makeWrench(position_applied, force, force_direction_vector)
def TransformWrenchFrame(wrench, old_wrench_frame, new_wrench_frame):
"""Deprecation notice function. Please use indicated correct function"""
print(TransformWrenchFrame.__name__ + ' is deprecated, use ' + transformWrenchFrame.__name__ + ' instead')
traceback.print_stack(limit=2)
return transformWrenchFrame(wrench, old_wrench_frame, new_wrench_frame)
def TwistToScrew(input_twist):
"""Deprecation notice function. Please use indicated correct function"""
print(TwistToScrew.__name__ + ' is deprecated, use ' + twistToScrew.__name__ + ' instead')
traceback.print_stack(limit=2)
return twistToScrew(input_twist)
def NormalizeTwist(twist):
"""Deprecation notice function. Please use indicated correct function"""
print(NormalizeTwist.__name__ + ' is deprecated, use ' + normalizeTwist.__name__ + ' instead')
traceback.print_stack(limit=2)
return normalizeTwist(twist)
def TwistFromTransform(input_transform):
"""Deprecation notice function. Please use indicated correct function"""
print(TwistFromTransform.__name__ + ' is deprecated, use ' + twistFromTransform.__name__ + ' instead')
traceback.print_stack(limit=2)
return twistFromTransform(input_transform)
def TransformFromTwist(input_twist):
"""Deprecation notice function. Please use indicated correct function"""
print(TransformFromTwist.__name__ + ' is deprecated, use ' + transformFromTwist.__name__ + ' instead')
traceback.print_stack(limit=2)
return transformFromTwist(input_twist)
def TrVec(transform, vec):
"""Deprecation notice function. Please use indicated correct function"""
print(TrVec.__name__ + ' is deprecated, use ' + transformByVector.__name__ + ' instead')
traceback.print_stack(limit=2)
return transformByVector(transform, vec)
def ChainJacobian(screws, theta):
"""Deprecation notice function. Please use indicated correct function"""
print(ChainJacobian.__name__ + ' is deprecated, use ' + chainJacobian.__name__ + ' instead')
traceback.print_stack(limit=2)
return chainJacobian(screws, theta)
def NumJac(f, x0, h):
"""Deprecation notice function. Please use indicated correct function"""
print(NumJac.__name__ + ' is deprecated, use ' + numericalJacobian.__name__ + ' instead')
traceback.print_stack(limit=2)
return numericalJacobian(f, x0, h)
def BoxSpatialInertia(m, l, w, h):
"""Deprecation notice function. Please use indicated correct function"""
print(BoxSpatialInertia.__name__ + ' is deprecated, use ' + boxSpatialInertia.__name__ + ' instead')
traceback.print_stack(limit=2)
return boxSpatialInertia(m, l, w, h)
def SetElements(data, inds, vals):
"""Deprecation notice function. Please use indicated correct function"""
print(SetElements.__name__ + ' is deprecated, use ' + setElements.__name__ + ' instead')
traceback.print_stack(limit=2)
return setElements(data, inds, vals)
```
#### File: basic_robotics/kinematics/arm_model.py
```python
from ..general import tm, fmr, fsr
from ..plotting.Draw import DrawArm, DrawRectangle
from ..utilities.disp import disp
import numpy as np
import scipy as sci
import scipy.linalg as ling
import random
import xml.etree.ElementTree as ET
import os
import json
from os.path import dirname, basename, isfile
class Arm:
#Conventions:
#Filenames: snake_case
#Variables: snake_case
#Functions: camelCase
#ClassNames: CapsCase
#Docstring: Google
#Converted to python - Liam
def __init__(self, base_pos_global, screw_list, end_effector_home,
joint_poses_home, joint_axes = np.array([0])):
""""
Create a serial arm
Args:
base_pos_global: Base transform of Arm. tmobject
screw_list: Screw list of arm. Nx6 matrix.
end_effector_home: Initial end effector of arm.
joint_poses_home: joint_poses_home list for arm
joint_axes: joint_axes list for arm
Returns:
Arm: arm object
"""
#Configure the arm given the base screws, the base transform.
self.cameras = []
self.num_dof = np.shape(screw_list)[1]
self._theta = np.zeros(self.num_dof)
self.vis_props = None
self.col_props = None
self.link_names = []
for i in range(self.num_dof):
self.link_names.append('link' + str(i))
self.link_names.append('end_effector')
self.eef_transform = None
self.link_home_positions = None
self.joint_origins = self.link_home_positions
self.initialize(base_pos_global, screw_list, end_effector_home, joint_poses_home)
self.link_mass_transforms = 0
self.box_spatial_links = 0
self.link_dimensions = None
self.grav = np.array([0, 0, -9.81])
self.fail_count = 0
for i in range(0, self.num_dof):
self.screw_list_body[:, i] = (fmr.Adjoint(self.end_effector_home.inv().gTM()) @
self.screw_list[:, i])
self.reversable = False
if joint_axes.shape[0] != 1:
self.reversable = True
self.reversed = False
self.joint_axes = joint_axes
self.original_joint_axes = joint_axes
self.original_screw_list = self.screw_list
self.joint_mins = np.ones(self.num_dof) * np.pi * -1
self.joint_maxs = np.ones(self.num_dof)* np.pi
self.max_vels = np.ones(self.num_dof) * np.Inf
self.max_effort = np.ones(self.num_dof) * np.inf
self.FK(np.zeros((self.num_dof)))
def initialize(self, base_pos_global, screw_list, end_effector_home, joint_poses_home):
"""
Helper for Serial Arm. Should be called internally
Args:
base_pos_global: Base transform of Arm. tmobject
screw_list: Screw list of arm. Nx6 matrix.
end_effector_home: Initial end effector of arm.
joint_poses_home: joint_poses_home list for arm
"""
self.screw_list = screw_list
self.original_screw_list_body = np.copy(screw_list)
if self.link_home_positions is not None:
for i in range((self.num_dof)):
base_to_link = fsr.globalToLocal(self.base_pos_global, self.link_home_positions[i])
new_global = fsr.localToGlobal(base_pos_global, base_to_link)
self.link_home_positions[i] = new_global
self.base_pos_global = base_pos_global
self.original_joint_poses_home = joint_poses_home
self.joint_poses_home = np.zeros((3, self.num_dof))
self.screw_list_body = np.zeros((6, self.num_dof))
if joint_poses_home.size > 1:
for i in range(0, self.num_dof):
self.joint_poses_home[0:3, i] = fsr.transformByVector(
base_pos_global, joint_poses_home[0:3, i])
#Convert transformByVector
for i in range(0, self.num_dof):
self.screw_list[:, i] = fmr.Adjoint(base_pos_global.gTM()) @ screw_list[:, i]
if joint_poses_home.size <= 1:
[w, th, joint_pose_temp, h] = fmr.TwistToScrew(self.screw_list[:, i])
#Convert TwistToScrew
self.joint_poses_home[0:3, i] = joint_pose_temp; # For plotting purposes
self.end_effector_home_local = end_effector_home
self.end_effector_home = base_pos_global @ end_effector_home
self.end_effector_pos_global = self.end_effector_home.copy()
self.original_end_effector_home = self.end_effector_home.copy()
"""
Compatibility, for those to be deprecated
"""
def printOutOfDateFunction(self, old_name, use_name):
print(old_name + ' is deprecated. Please use ' + use_name + ' instead.')
def RandomPos(self):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('RandomPos', 'randomPos')
return self.randomPos()
def Reverse(self):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('Reverse', 'reverse')
self.reverse()
def vServoSP(self, target, tol = 2, ax = 0, plt = 0, fig = 0):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('vServoSP', 'visualServoToTarget')
return self.visualServoToTarget(target, tol, ax, plt, fig)
def SetDynamicsProperties(self, _Mlinks = None, _Mhome = None, _Glinks = None, _Dims = None):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('SetDynamicsProperties', 'setDynamicsProperties')
return self.setDynamicsProperties(_Mlinks, _Mhome, _Glinks, _Dims)
def SetMasses(self, mass):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('SetMasses', 'setMasses')
return self.setMasses(mass)
def TestArmValues(self):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('TestArmValues', 'testArmValues')
return self.testArmValues()
def SetArbitraryHome(self, theta,T):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('SetArbitraryHome', 'setArbitraryHome')
return self.setArbitraryHome(theta, T)
def RestoreOriginalEE(self):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('RestoreOriginalEE', 'restoreOriginalEE')
return self.restoreOriginalEE()
def StaticForces(self, theta, wrenchEE):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('StaticForces', 'staticForces')
return self.staticForces(theta, wrenchEE)
def StaticForcesInv(self, theta, tau):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('StaticForcesInv', 'staticForcesInv')
return self.staticForcesInv(theta, tau)
def InverseDynamics(self, theta, thetadot, thetadotdot, grav, wrenchEE):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('InverseDynamics', 'inverseDynamics')
return self.inverseDynamics(theta, thetadot, thetadotdot, grav, wrenchEE)
def InverseDynamicsEMR(self, theta, thetadot, thetadotdot, grav, wrenchEE):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('InverseDynamicsEMR', 'inverseDynamicsEMR')
return self.inverseDynamicsEMR(theta, thetadot, thetadotdot, grav, wrenchEE)
def InverseDynamicsE(self, theta, thetadot, thetadotdot, grav, wrenchEE):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('InverseDynamicsE', 'inverseDynamicsE')
return self.inverseDynamicsE(theta, thetadot, thetadotdot, grav, wrenchEE)
def InverseDynamicsC(self, theta, thetadot, thetadotdot, grav, wrenchEE):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('InverseDynamicsC', 'inverseDynamicsC')
return self.inverseDynamicsC(theta, thetadot, thetadotdot, grav, wrenchEE)
def ForwardDynamicsE(self, theta, thetadot, tau, grav, wrenchEE):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('ForwardDynamicsE', 'forwardDynamicsE')
return self.forwardDynamicsE(theta, thetadot, tau, grav, wrenchEE)
def ForwardDynamics(self, theta, thetadot, tau, grav, wrenchEE):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('ForwardDynamics', 'forwardDynamics')
return self.forwardDynamics(theta, thetadot, tau, grav, wrenchEE)
def MassMatrix(self, theta):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('MassMatrix', 'massMatrix')
return self.massMatrix(theta)
def CoriolisGravity(self, theta, thetadot, grav):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('CoriolisGravity', 'coriolisGravity')
return self.coriolisGravity(theta, thetadot, grav)
def EndEffectorForces(self, theta, wrenchEE):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('EndEffectorForces', 'endEffectorForces')
return self.endEffectorForces(theta, wrenchEE)
def Jacobian(self, theta):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('Jacobian', 'jacobian')
return self.jacobian(theta)
def JacobianBody(self, theta):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('JacobianBody', 'jacobianBody')
return self.jacobianBody(theta)
def JacobianLink(self, theta, i):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('JacobianLink', 'jacobianLink')
return self.jacobianLink(theta, i)
def JacobianEE(self, theta):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('JacobianEE', 'jacobianEE')
return self.jacobianEE(theta)
def JacobianEEtrans(self, theta):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('JacobianEEtrans', 'jacobianEEtrans')
return self.jacobianEEtrans(theta)
def NumericalJacobian(self, theta):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('NumericalJacobian', 'numericalJacobian')
return self.numericalJacobian(theta)
def GetManipulability(self, theta = None):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('GetManipulability', 'getManipulability')
return self.getManipulability(theta)
def Draw(self, ax):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction('Draw', 'draw')
return self.draw(ax)
"""
_ ___ _ _
| |/ (_) | | (_)
| ' / _ _ __ ___ _ __ ___ __ _| |_ _ ___ ___
| < | | '_ \ / _ \ '_ ` _ \ / _` | __| |/ __/ __|
| . \| | | | | __/ | | | | | (_| | |_| | (__\__ \
|_|\_\_|_| |_|\___|_| |_| |_|\__,_|\__|_|\___|___/
"""
def thetaProtector(self, theta):
"""
Properly bounds theta values
Args:
theta: joint angles to be tested and reset
Returns:
newtheta: corrected joint angles
"""
theta[np.where(theta<self.joint_mins)] = self.joint_mins[np.where(theta<self.joint_mins)]
theta[np.where(theta>self.joint_maxs)] = self.joint_maxs[np.where(theta>self.joint_maxs)]
return theta
#Converted to python -Liam
def FK(self, theta, protect = False):
"""
Calculates the end effector position of the serial arm given thetas
params:
theta: input joint array
protect: whether or not to validate action
returns:
end_effector_transform: End effector tm
"""
if not protect and (np.any(theta < self.joint_mins) or np.any(theta > self.joint_maxs)):
theta = self.thetaProtector(theta)
self._theta = fsr.angleMod(theta.reshape(len(theta)))
end_effector_transform = tm(fmr.FKinSpace(
self.end_effector_home.gTM(), self.screw_list, theta))
self.end_effector_pos_global = end_effector_transform
return end_effector_transform
#Converted to python - Liam
def FKLink(self, theta, i, protect = False):
"""
Calculates the position of a given joint provided a theta list
Args:
theta: The array of theta values for each joint
i: The index of the joint desired, from 0
"""
# Returns the TM of link i
# Lynch 4.1
if not protect and (np.any(theta < self.joint_mins) or np.any(theta > self.joint_maxs)):
print('Unsuitable Thetas')
theta = self.thetaProtector(theta)
end_effector_pos = tm(fmr.FKinSpace(self.link_home_positions[i].TM,
self.screw_list[0:6, 0:i], theta[0:i]))
return end_effector_pos
#Converted to python - Liam
def IK(self,T, theta_init = np.zeros(1), check = 1, level = 6, protect = False):
"""
Calculates joint positions of a serial arm. All parameters are
optional except the desired end effector position
Args:
T: Desired end effector position to calculate for
theta_init: Intial theta guess for desired end effector position.
Set to 0s if not provided.
check: Whether or not the program should retry if the position finding fails
level: number of recursive calls allowed if check is enabled
Returns:
List of thetas, success boolean
"""
if theta_init.size == 1:
theta_init = fsr.angleMod(self._theta.reshape(len(self._theta)))
if not protect:
return self.constrainedIK(T, theta_init, check, level)
theta, success = fmr.IKinSpace(self.screw_list, self.end_effector_home.gTM(),
T.gTM(), theta_init, 0.00000001, 0.00000001)
theta = fsr.angleMod(theta)
self._theta = theta
if success:
self.end_effector_pos_global = T
else:
if check == 1:
i = 0
while i < level and success == 0:
theta_temp = np.zeros((len(self._theta)))
for j in range(len(theta_temp)):
theta_temp[j] = random.uniform(-np.pi, np.pi)
theta, success = fmr.IKinSpace(self.screw_list, self.end_effector_home.gTM(),
T.gTM(), theta_init, 0.00000001, 0.00000001)
i = i + 1
if success:
self.end_effector_pos_global = T
return theta, success
def constrainedIK(self, T, theta_init, check = 1, level = 6):
"""
Calculates joint positions of a serial arm, provided rotational constraints on the Joints
All parameters are optional except the desired end effector position
Joint constraints are set through the joint_maxs and joint_mins properties, and should be
arrays the same size as the number of DOFS
Args:
T: Desired end effector position to calculate for
theta_init: Intial theta guess for desired end effector position.
Set to 0s if not provided.
check: Whether or not the program should retry if the position finding fails
level: number of recursive calls allowed if check is enabled
Returns:
List of thetas, success boolean
"""
if not isinstance(T, tm):
print(T)
print('Attempted pass ^')
return self._theta
screw_list = self.screw_list.copy()
if check == 1:
self.fail_count = 0
M = self.end_effector_home.copy()
pos_tolerance = .001
rot_tolerance = .0001
theta_list = self._theta.copy()
i = 0
max_iterations = 30
try:
theta_list, success = fmr.IKinSpaceConstrained(screw_list, M.gTM(),
T.gTM(), theta_list, pos_tolerance, rot_tolerance,
self.joint_mins, self.joint_maxs, max_iterations)
except:
theta_list, success = self.constrainedIKNoFMR(screw_list, M, T, theta_list,
pos_tolerance, rot_tolerance, max_iterations)
if success:
self.end_effector_pos_global = T
else:
if check == 1:
i = 0
while i < level and success == 0:
theta_temp = np.zeros((len(self._theta)))
for j in range(len(theta_temp)):
theta_temp[j] = random.uniform(self.joint_mins[j], self.joint_maxs[j])
try:
theta_list, success = fmr.IKinSpaceConstrained(screw_list, M.gTM(),
T.gTM(), theta_temp, pos_tolerance, rot_tolerance,
self.joint_mins, self.joint_maxs, max_iterations)
except Exception as e:
theta_list, success = self.constrainedIK(T, theta_temp, check = 0)
disp('FMR Failure: ' + str(e))
i = i + 1
if success:
self.end_effector_pos_global = T
if not success:
if check == 0:
self.fail_count += 1
else:
#print('Total Cycle Failure')
self.FK(np.zeros(len(self._theta)))
else:
if self.fail_count != 0:
print('Success + ' + str(self.fail_count) + ' failures')
self.FK(theta_list)
return theta_list, success
def constrainedIKNoFMR(self,
screw_list, M, T, theta_list, pos_tolerance, rot_tolerance, max_iterations):
"""
Used as a backup function for the standard constrained IK
Args:
screw_list: screw list
M: home end effector position
T: Goal Position
theta_list: Initial thetas
pos_tolerance: Positional tolerance
rot_tolerance: Rotational tolerance
max_iterations: Maximum Iterations before failure
Returns:
theta_list: list of solved thetas
success: boolean of success
"""
end_effector_pos_temp = fmr.FKinSpace(M.gTM(), screw_list, theta_list)
error_vec = np.dot(fmr.Adjoint(end_effector_pos_temp), fmr.se3ToVec(
fmr.MatrixLog6(np.dot(fmr.TransInv(end_effector_pos_temp), T.gTM()))))
#print(fmr.MatrixLog6(np.dot(fmr.TransInv(end_effector_pos_temp), T)), 'Test')
err = np.linalg.norm([error_vec[0], error_vec[1], error_vec[2]]) > pos_tolerance \
or np.linalg.norm([error_vec[3], error_vec[4], error_vec[5]]) > rot_tolerance
if np.isnan(error_vec).any():
err = True
i = 0
while err and i < max_iterations:
theta_list = theta_list \
+ np.dot(np.linalg.pinv(fmr.JacobianSpace(
screw_list, theta_list)), error_vec)
for j in range(len(theta_list)):
if theta_list[j] < self.joint_mins[j]:
theta_list[j] = self.joint_mins[j]
if theta_list[j] > self.joint_maxs[j]:
theta_list[j] = self.joint_maxs[j];
i = i + 1
end_effector_pos_temp = fmr.FKinSpace(M.gTM(), screw_list, theta_list)
error_vec = np.dot(fmr.Adjoint(end_effector_pos_temp), \
fmr.se3ToVec(fmr.MatrixLog6(np.dot(
fmr.TransInv(end_effector_pos_temp), T.gTM()))))
err = np.linalg.norm([error_vec[0], error_vec[1], error_vec[2]]) > pos_tolerance \
or np.linalg.norm([error_vec[3], error_vec[4], error_vec[5]]) > rot_tolerance
if np.isnan(error_vec).any():
err = True
success = not err
return theta_list, success
def IKForceOptimal(self, T, theta_init, forcev, random_sample = 1000, mode = 'MAX'):
"""
Early attempt at creating a force optimization package for a serial arm.
Absolutely NOT the optimial way to do this. Only works for overactuated arms.
Args:
T: Desired end effector position to calculate for
theta_init: Intial theta guess for desired end effector position.
Set to 0s if not provided.
forcev: Force applied to the end effector of the arm. Wrench.
random_sample: number of samples to test to look for the most optimal solution
mode: Set Mode to reduce. Max: Max Force. Sum: Sum of forces. Mean: Mean Force
Returns:
List of thetas
"""
thetas = []
for i in range(random_sample):
theta_temp = np.zeros((len(self._theta)))
for j in range(len(theta_init)):
theta_temp[j] = random.uniform(-np.pi, np.pi)
thetas.append(theta_temp)
force_thetas = []
temp_moment = np.cross(T[0:3].reshape((3)), forcev)
wrench = np.array([temp_moment[0], temp_moment[1],
temp_moment[2], forcev[0], forcev[1], forcev[2]]).reshape((6, 1))
for i in range(len(thetas)):
candidate_theta, success =self.IK(T, thetas[i])
if success and sum(abs(fsr.poseError(self.FK(candidate_theta), T))) < .0001:
force_thetas.append(candidate_theta)
max_force = []
for i in range(len(force_thetas)):
if mode == 'MAX':
max_force.append(max(abs(self.staticForces(force_thetas[i], wrench))))
elif mode == 'SUM':
max_force.append(sum(abs(self.staticForces(force_thetas[i], wrench))))
elif mode == 'MEAN':
max_force.append(sum(abs(
self.staticForces(force_thetas[i], wrench))) / len(force_thetas))
index = max_force.index(min(max_force))
self._theta = force_thetas[index]
return force_thetas[index]
def IKMotion(self, T, theta_init):
"""
This calculates IK by numerically moving the end effector
from the pose defined by theta_init in the direction of the desired
pose T. If the pose cannot be reached, it gets as close as
it can. This can sometimes return a better result than IK.
An approximate explanation an be found in Lynch 9.2
Args:
T: Desired end effector position to calculate for
theta_init: Intial theta guess for desired end effector position.
Returns:
theta: list of theta lists
success: boolean for success
t: goal
thall: integration results
"""
start_transform = self.FK(theta_init)
start_direction = T @ start_transform.inv()
twist_direction = fsr.TwistFromTransform(start_direction)
#[t, thall] = ode15s(@(t, x)(pinv(self.jacobian(x))*twist_direction),[0 1], theta_init);
res = lambda t, x: np.linalg.pinv(self.jacobian(x))*twist_direction
t = sci.integrate.ode(res).set_integrator('vode', method='bdf', order=15)
thall = t.integrate(1)
theta = thall[-1,:].conj().T
if fsr.Norm(T-self.FK(theta)) < 0.001:
success = 1
else:
success = 0
return theta, success, t, thall
def IKFree(self,T, theta_init, inds):
"""
Only allow theta_init(freeinds) to be varied
Method not covered in Lynch.
SetElements inserts the variable vector x into the positions
indicated by freeinds in theta_init. The remaining elements are
unchanged.
Args:
T: Desired end effector position to calculate for
theta_init: Intial theta guess for desired end effector position.
inds: Free indexes to move
Returns:
theta: list of theta lists
success: boolean for success
t: goal
thall: integration results
"""
#free_thetas = fsolve(@(x)(obj.FK(SetElements(theta_init,
#freeinds, x))-T), theta_init(freeinds))
res = lambda x : fsr.TAAtoTM(self.FK(fsr.setElements(theta_init, inds, x))-T)
#Use newton_krylov instead of fsolve
free_thetas = sci.optimize.fsolve(res, theta_init[inds])
# free_thetas = fsolve(@(x)(self.FK(SetElements(theta_init,
#freeinds, x))-T), theta_init(freeinds));
theta = np.squeeze(theta_init);
theta[inds] = np.squeeze(free_thetas);
if fsr.Norm(T-self.FK(theta)) < 0.001:
success = 1;
else:
success = 0;
return (theta, success)
"""
_ ___ _ _ _ _ _
| |/ (_) | | (_) | | | | | |
| ' / _ _ __ ___ _ __ ___ __ _| |_ _ ___ ___ | |__| | ___| |_ __ ___ _ __ ___
| < | | '_ \ / _ \ '_ ` _ \ / _` | __| |/ __/ __| | __ |/ _ \ | '_ \ / _ \ '__/ __|
| . \| | | | | __/ | | | | | (_| | |_| | (__\__ \ | | | | __/ | |_) | __/ | \__ \
|_|\_\_|_| |_|\___|_| |_| |_|\__,_|\__|_|\___|___/ |_| |_|\___|_| .__/ \___|_| |___/
| |
|_|
"""
def randomPos(self):
"""
Create a random position, return the end effector TF
Returns:
random pos
"""
theta_temp = np.zeros((len(self._theta)))
for j in range(len(theta_temp)):
theta_temp[j] = random.uniform(self.joint_mins[j], self.joint_maxs[j])
pos = self.FK(theta_temp)
return pos
def reverse(self):
"""
Flip around the serial arm so that the end effector is not the base and vice versa.
Keep the same end pose
"""
if not self.reversable:
return
old_thetas = np.copy(self._theta)
new_theta = np.zeros((len(self._theta)))
for i in range(self.num_dof):
new_theta[i] = old_thetas[len(old_thetas) - 1 - i]
new_screw_list = np.copy(self.original_screw_list)
new_end_effector_home = self.end_effector_home.copy()
new_thetas = self.FK(self._theta)
new_joint_axes = np.copy(self.joint_axes)
new_joint_poses_home = np.copy(self.original_joint_poses_home)
for i in range(new_joint_axes.shape[1]):
new_joint_axes[0:3, i] = self.joint_axes[0:3, new_joint_axes.shape[1] - 1 - i]
differences = np.zeros((3, new_joint_poses_home.shape[1]-1))
for i in range(new_joint_poses_home.shape[1]-1):
differences[0:3, i] = (self.original_joint_poses_home[0:3,(
self.original_joint_poses_home.shape[1] - 1 - i)] -
self.original_joint_poses_home[0:3,(
self.original_joint_poses_home.shape[1] - 2 - i)])
#print(differences, 'differences')
for i in range(new_joint_poses_home.shape[1]):
if i == 0:
new_joint_poses_home[0:3, i] = (self.original_joint_poses_home[0:3, (
self.original_joint_poses_home.shape[1] - 1)] - np.sum(differences, axis = 1))
else:
new_joint_poses_home[0:3, i] = (new_joint_poses_home[0:3, i -1] +
differences[0:3, i - 1])
for i in range(self.num_dof):
new_screw_list[0:6, i] = np.hstack((new_joint_axes[0:3, i],
np.cross(new_joint_poses_home[0:3, i], new_joint_axes[0:3, i])))
new_thetas = (new_thetas @
tm([0, 0, 0, 0, np.pi, 0]) @ tm([0, 0, 0, 0, 0, np.pi]))
if np.size(self.link_dimensions) != 1:
new_link_dimensions = np.zeros((self.link_dimensions.shape))
for i in range(self.link_dimensions.shape[1]):
new_link_dimensions[0:3, i] = (
self.link_dimensions[0:3,(self.link_dimensions.shape[1] - i -1)])
self.link_dimensions = new_link_dimensions
if len(self.link_home_positions) != 1:
new_link_home_positions = [None] * len(self.link_home_positions)
for i in range(len(new_link_home_positions)):
new_link_home_positions[i] = (
self.link_home_positions[len(new_link_home_positions) - i -1])
self.link_home_positions = new_link_home_positions
self.screw_list = new_screw_list
self.original_screw_list = np.copy(new_screw_list)
#print(self.base_pos_global, '')
new_end_effector_home = new_thetas @ self.end_effector_home_local
self.base_pos_global = new_thetas
self.original_joint_poses_home = new_joint_poses_home
self.joint_poses_home = np.zeros((3, self.num_dof))
self.screw_list_body = np.zeros((6, self.num_dof))
if new_joint_poses_home.size > 1:
for i in range(0, self.num_dof):
self.joint_poses_home[0:3, i] = fsr.transformByVector(new_thetas,
new_joint_poses_home[0:3, i])
#Convert transformByVector
for i in range(0, self.num_dof):
self.screw_list[:, i] = fmr.Adjoint(new_thetas.gTM()) @ new_screw_list[:, i]
if new_joint_poses_home.size <= 1:
[w, th, joint_pose_temp, h] = fmr.TwistToScrew(self.screw_list[:, i])
#Convert TwistToScrew
self.joint_poses_home[0:3, i] = joint_pose_temp; # For plotting purposes
self.end_effector_home = new_end_effector_home
self.original_end_effector_home = self.end_effector_home.copy()
if len(self.link_home_positions) != 1:
new_link_mass_transforms = [None] * len(self.link_home_positions)
new_link_mass_transforms[0] = self.link_home_positions[0];
for i in range(1, 6):
new_link_mass_transforms[i] = (
self.link_home_positions[i-1].inv() @ self.link_home_positions[i])
new_link_mass_transforms[len(self.link_home_positions) -1] = (
self.link_home_positions[5].inv() @ self.end_effector_home)
self.link_mass_transforms = new_link_mass_transforms
self.box_spatial_links = 0
for i in range(0, self.num_dof):
self.screw_list_body[:, i] = (
fmr.Adjoint(self.end_effector_home.inv().gTM()) @ self.screw_list[:, i])
#print(new_theta)
self.FK(new_theta)
"""
__ __ _ _ _____ _ _
| \/ | | | (_) | __ \| | (_)
| \ / | ___ | |_ _ ___ _ __ | |__) | | __ _ _ __ _ __ _ _ __ __ _
| |\/| |/ _ \| __| |/ _ \| '_ \ | ___/| |/ _` | '_ \| '_ \| | '_ \ / _` |
| | | | (_) | |_| | (_) | | | | | | | | (_| | | | | | | | | | | | (_| |
|_| |_|\___/ \__|_|\___/|_| |_| |_| |_|\__,_|_| |_|_| |_|_|_| |_|\__, |
__/ |
|___/
"""
def lineTrajectory(self, target, initial = 0, execute = True,
tol = np.array([.05, .05, .05, .05, .05, .05]), delt = .01):
"""
Move the arm end effector in a straight line towards the target
Args:
target: Target pose to reach
intial: Starting pose. If set to 0, as is default, uses current position
execute: Execute the desired motion after calculation
tol: tolerances on motion
delt: delta in meters to be calculated for each step
Returns:
theta_list list of theta configurations
"""
if initial == 0:
initial = self.end_effector_pos_global.copy()
satisfied = False
init_theta = np.copy(self._theta)
theta_list = []
count = 0
while not satisfied and count < 2500:
count+=1
error = fsr.poseError(target, initial).gTAA().flatten()
satisfied = True
for i in range(6):
if abs(error[i]) > tol[i]:
satisfied = False
initial = fsr.closeLinearGap(initial, target, delt)
theta_list.append(np.copy(self._theta))
self.IK(initial, self._theta)
self.IK(target, self._theta)
theta_list.append(self._theta)
if (execute == False):
self.FK(init_theta)
return theta_list
def visualServoToTarget(self, target, tol = 2, ax = 0, plt = 0, fig = 0):
"""
Use a virtual camera to perform visual servoing to target
Args:
target: Object to move to
tol: piexel tolerance
ax: matplotlib object to draw to
plt: matplotlib plot
fig: whether or not to draw
Returns: Thetalist for arm, figure object
Returns:
theta: thetas at goal
fig: figure
"""
if (len(self.cameras) == 0):
print('NO CAMERA CONNECTED')
return
at_target = False
done = False
start_pos = self.FK(self._theta)
theta = 0
j = 0
plt.ion()
images = []
while not (at_target and done):
for i in range(len(self.cameras)):
pose_adjust = tm()
at_target = True
done = True
img, q, suc = self.cameras[i][0].getPhoto(target)
if not suc:
print('Failed to locate Target')
return self._theta
if img[0] < self.cameras[i][2][0] - tol:
pose_adjust[0] = -.01
at_target = False
if img[0] > self.cameras[i][2][0] + tol:
pose_adjust[0] = .01
at_target = False
if img[1] < self.cameras[i][2][1] - tol:
pose_adjust[1] = -.01
at_target = False
if img[1] > self.cameras[i][2][1] + tol:
pose_adjust[1] = .01
at_target = False
if at_target:
d = fsr.distance(self.end_effector_pos_global, target)
print(d)
if d < .985:
done = False
pose_adjust[2] = -.01
if d > 1.015:
done = False
pose_adjust[2] = .01
start_pos =start_pos @ pose_adjust
theta = self.IK(start_pos, self._theta)
self.updateCams()
if fig != 0:
ax = plt.axes(projection = '3d')
ax.set_xlim3d(-7, 7)
ax.set_ylim3d(-7, 7)
ax.set_zlim3d(0, 8)
DrawArm(self, ax)
DrawRectangle(target, [.2, .2, .2], ax)
print('Animating')
plt.show()
plt.savefig('VideoTemp' + '/file%03d.png' % j)
ax.clear()
j = j + 1
return theta, fig
def PDControlToGoalEE(self, theta, goal_position, Kp, Kd, prevtheta, max_theta_dot):
"""
Uses PD Control to Maneuver to an end effector goal
Args:
theta: start theta
goal_position: goal position
Kp: P parameter
Kd: D parameter
prevtheta: prev_theta parameter
max_theta_dot: maximum joint velocities
Returns:
scaled_theta_dot: scaled velocities
"""
current_end_effector_pos = self.FK(theta)
previous_end_effector_pos = self.FK(prevtheta)
error_ee_to_goal = fsr.Norm(current_end_effector_pos[0:3, 3]-goal_position[0:3, 3])
delt_distance_to_goal = (error_ee_to_goal-
fsr.Norm(previous_end_effector_pos[0:3, 3]-goal_position[0:3, 3]))
scale = Kp @ error_ee_to_goal + Kd @ min(0, delt_distance_to_goal)
twist = self.TwistSpaceToGoalEE(theta, goal_position)
twist_norm = fsr.NormalizeTwist(twist)
normalized_twist = twist/twist_norm
theta_dot = self.ThetadotSpace(theta, normalized_twist)
scaled_theta_dot = max_theta_dot/max(abs(theta_dot)) @ theta_dot @ scale
return scaled_theta_dot
"""
_____ _ _ _ _____ _ _
/ ____| | | | | /\ | | / ____| | | | |
| | __ ___| |_| |_ ___ _ __ ___ / \ _ __ __| | | (___ ___| |_| |_ ___ _ __ ___
| | |_ |/ _ \ __| __/ _ \ '__/ __| / /\ \ | '_ \ / _` | \___ \ / _ \ __| __/ _ \ '__/ __|
| |__| | __/ |_| || __/ | \__ \ / ____ \| | | | (_| | ____) | __/ |_| || __/ | \__ \
\_____|\___|\__|\__\___|_| |___/ /_/ \_\_| |_|\__,_| |_____/ \___|\__|\__\___|_| |___/
"""
def setDynamicsProperties(self, link_mass_transforms = None,
link_home_positions = None, box_spatial_links = None, link_dimensions = None):
"""
Set dynamics properties of the arm
At mimimum dimensions are a required parameter for drawing of the arm.
Args:
link_mass_transforms: The mass matrices of links
link_home_positions: List of Home Positions
box_spatial_links: Mass Matrices (Inertia)
link_dimensions: Dimensions of links
"""
self.link_mass_transforms = link_mass_transforms
self.link_home_positions = link_home_positions
self.box_spatial_links = box_spatial_links
self.link_dimensions = link_dimensions
def setMasses(self, mass):
"""
set Masses
Args:
mass: mass
"""
self.masses = mass
def testArmValues(self):
"""
prints a bunch of arm values
"""
np.set_printoptions(precision=4)
np.set_printoptions(suppress=True)
print('S')
print(self.screw_list, title = 'screw_list')
print('screw_list_body')
print(self.screw_list_body, title = 'screw_list_body')
print('Q')
print(self.joint_poses_home, title = 'joint_poses_home list')
print('end_effector_home')
print(self.end_effector_home, title = 'end_effector_home')
print('original_end_effector_home')
print(self.original_end_effector_home, title = 'original_end_effector_home')
print('_Mlinks')
print(self.link_mass_transforms, title = 'Link Masses')
print('_Mhome')
print(self.link_home_positions, title = '_Mhome')
print('_Glinks')
print(self.box_spatial_links, title = '_Glinks')
print('_dimensions')
print(self.link_dimensions, title = 'Dimensions')
def getJointTransforms(self):
"""
returns tmobjects for each link in a serial arm
Returns:
tmlist
"""
dimensions = np.copy(self.link_dimensions).conj().T
joint_pose_list = [None] * dimensions.shape[0]
end_effector_home = self.base_pos_global
end_effector_transform = tm(fmr.FKinSpace(end_effector_home.gTM(),
self.screw_list[0:6, 0:0], self._theta[0:0]))
#print(end_effector_transform, 'EEPOS')
joint_pose_list[0] = end_effector_transform
for i in range((self.num_dof)):
if self.link_home_positions == None:
temp_tm = tm()
temp_tm[0:3, 0] = self.original_joint_poses_home[0:3, i]
end_effector_home = self.base_pos_global @ temp_tm
else:
end_effector_home = self.link_home_positions[i]
#print(end_effector_home, 'end_effector_home' + str(i + 1))
#print(self._theta[0:i+1])
end_effector_transform = tm(fmr.FKinSpace(end_effector_home.gTM(),
self.screw_list[0:6, 0:i], self._theta[0:i]))
#print(end_effector_transform, 'EEPOS')
joint_pose_list[i] = end_effector_transform
if dimensions.shape[0] > self.num_dof:
#Fix handling of dims
#print(fsr.TAAtoTM(np.array([0.0, 0.0, self.link_dimensions[-1, 2], 0.0 , 0.0, 0.0])))
joint_pose_list[len(joint_pose_list) - 1] = self.FK(self._theta)
#if self.eef_transform is not None:
# joint_pose_list.append(joint_pose_list[-1] @ self.eef_transform)
return joint_pose_list
def setArbitraryHome(self, theta,T):
"""
# Given a pose and some T in the space frame, find out where
# that T is in the EE frame, then find the home pose for
# that arbitrary pose
Args:
theta: theta configuration
T: new transform
"""
end_effector_temp = self.FK(theta)
ee_to_new = np.cross(np.inv(end_effector_temp),T)
self.end_effector_home = np.cross(self.end_effector_home, ee_to_new)
#Converted to Python - Joshua
def restoreOriginalEE(self):
"""
Retstore the original End effector of the Arm
"""
self.end_effector_home = self.original_end_effector_home
def getEEPos(self):
"""
Gets End Effector Position
"""
#if self.eef_transform is not None:
# return self.end_effector_pos_global.copy() @ self.eef_transform
return self.end_effector_pos_global.copy()
def getScrewList(self):
"""
Returns screw list in space
Return:
screw list
"""
return self.screw_list.copy()
def getLinkDimensions(self):
"""
Returns link dimensions
Return:
link dimensions
"""
return self.link_dimensions.copy()
"""
______ _ _____ _
| ____| | | | __ \ (_)
| |__ ___ _ __ ___ ___ ___ __ _ _ __ __| | | | | |_ _ _ __ __ _ _ __ ___ _ ___ ___
| __/ _ \| '__/ __/ _ \/ __| / _` | '_ \ / _` | | | | | | | | '_ \ / _` | '_ ` _ \| |/ __/ __|
| | | (_) | | | (_| __/\__ \ | (_| | | | | (_| | | |__| | |_| | | | | (_| | | | | | | | (__\__ \
|_| \___/|_| \___\___||___/ \__,_|_| |_|\__,_| |_____/ \__, |_| |_|\__,_|_| |_| |_|_|\___|___/
__/ |
|___/
"""
def velocityAtEndEffector(self, joint_velocities, theta = None):
"""
Calculate velocity at end effector based on joint velocities
Args:
joint_velocities: joint velocity vector to calculate based on
theta: [Optional] theta value to set position
Returns:
ndarray: end effector velocities
"""
if theta is not None:
self.FK(theta)
end_effector_vels = self.jacobian(theta).conj().T @ joint_velocities.reshape((6, 1))
return end_effector_vels
def staticForces(self, theta, end_effector_wrench):
"""
Calculate forces on each link of the serial arm
Args:
theta: Current position of the arm
end_effector_wrench: end effector wrench (space frame)
Returns:
forces in newtons on each joint
"""
end_effector_temp = self.FK(theta) #Space Frame
tau = self.jacobian(theta).conj().T @ end_effector_wrench
return tau
#def staticForces(self, theta, end_effector_wrench):
# end_effector_temp = self.FK(theta)
# wrenchS = fmr.Adjoint(ling.inv(end_effector_temp)).conj().T @ end_effector_wrench
# return self.jacobian(theta).conj().T @ wrenchS
def staticForcesInv(self, theta, tau):
"""
Given a position on the arm and forces for each joint,
calculate the wrench on the end effector
Args:
theta: current joint positions of the arm
tau: forces on the joints of the arm in Newtons
Returns:
wrench on the end effector of the arm
"""
x_init = np.zeros((len(theta)))
temp = lambda x : (self.staticForces(theta, x[0:6])-tau)
end_effector_wrench = sci.optimize.fsolve(temp, x_init)
return end_effector_wrench[0:6]
def staticForceWithLinkMasses(self, theta, end_effector_wrench):
"""
Calculate Static Forces with Link Masses. Dependent on Loading URDF Prior
Args:
theta: joint configuration to analyze
end_effector_wrench: wrench at the end effector (can be zeros)
Returns:
tau: joint torques of the robot
"""
end_effector_temp = self.FK(theta)
jacobian = self.jacobian(theta)
tau_init = jacobian.T @ end_effector_wrench
carry_wrench = end_effector_wrench
joint_poses = self.getJointTransforms()
for i in range(self.num_dof, 0, -1):
link_mass_cg = self.masses_cg[i]
link_mass = self.masses[i]
applied_pos_global = joint_poses[i] @ link_mass_cg
carry_wrench = carry_wrench + fsr.makeWrench(applied_pos_global, link_mass, self.grav)
tau = jacobian[0:6, 0:i].T @ carry_wrench
tau_init[i-1] = tau[-1]
return tau_init
def inverseDynamics(self, theta, theta_dot, theta_dot_dot, grav, end_effector_wrench):
"""
Inverse dynamics
Args:
theta: theta
theta_dot: theta 1st deriviative
theta_dot_dot: theta 2nd derivative
grav: gravity
end_effector_wrench: end effector wrench
Returns
tau: tau
A: todo
V: todo
vel_dot: todo
F: todo
"""
return self.inverseDynamicsE(theta, theta_dot, theta_dot_dot, grav, end_effector_wrench)
def inverseDynamicsEMR(self, theta, theta_dot, theta_dot_dot, grav, end_effector_wrench):
"""
Inverse dynamics
Args:
theta: theta
theta_dot: theta 1st deriviative
theta_dot_dot: theta 2nd derivative
grav: gravity
end_effector_wrench: end effector wrench
Returns
tau: tau
A: todo
V: todo
vel_dot: todo
F: todo
"""
return fmr.inverseDynamics(theta, theta, theta_dot, grav, end_effector_wrench,
self.link_mass_transforms, self.box_spatial_links, self.screw_list)
def inverseDynamicsE(self, theta, theta_dot, theta_dot_dot, grav, end_effector_wrench):
"""
Inverse dynamics
Args:
theta: theta
theta_dot: theta 1st deriviative
theta_dot_dot: theta 2nd derivative
grav: gravity
end_effector_wrench: end effector wrench
Returns
tau: tau
A: todo
V: todo
vel_dot: todo
F: todo
"""
#Multiple Bugs Fixed - Liam Aug 4 2019
A = np.zeros((self.screw_list.shape))
V = np.zeros((self.screw_list.shape))
vel_dot = np.zeros((self.screw_list.shape))
for i in range(self.num_dof):
#A[0:6, i] =(fmr.Adjoint(ling.inv(self.link_home_positions[i,:,:].reshape((4, 4)))) @
# self.screw_list[0:6, i].reshape((6, 1))).reshape((6))
A[0:6, i] = (self.link_home_positions[i].inv().Adjoint() @
self.screw_list[0:6, i]).reshape((6))
#Ti_im1 =
# (fmr.MatrixExp6(fmr.VecTose3(A[0:6, i]) * theta[i]) @
# ling.inv(self.link_mass_transforms[i,:,:])
Ti_im1 = (fmr.MatrixExp6(fmr.VecTose3(A[0:6, i]) * theta[i]) @
self.link_mass_transforms[i].inv().TM)
if i > 0:
V[0:6, i] = (A[0:6, i].reshape((6, 1)) * theta_dot[i] +
fmr.Adjoint(Ti_im1) @ V[0:6, i-1].reshape((6, 1))).reshape((6))
#print((((A[0:6, i] * theta_dot_dot[i]).reshape((6, 1)) + (fmr.Adjoint(Ti_im1) @
# vel_dot[0:6, i-1]).reshape((6, 1)) + (fmr.ad(V[0:6, i]) @ A[0:6, i] *
# theta_dot[i]).reshape((6, 1))).reshape((6, 1)), 'vcomp'))
vel_dot[0:6, i] = (((A[0:6, i] * theta_dot_dot[i]).reshape((6, 1)) +
(fmr.Adjoint(Ti_im1) @ vel_dot[0:6, i-1]).reshape((6, 1)) +
(fmr.ad(V[0:6, i]) @ A[0:6, i] * theta_dot[i]).reshape((6, 1))).reshape((6)))
else:
V[0:6, i] = ((A[0:6, i].reshape((6, 1)) * theta_dot[i] +
fmr.Adjoint(Ti_im1) @ np.zeros((6, 1))).reshape((6)))
vel_dot[0:6, i] = (((A[0:6, i] * theta_dot_dot[i]).reshape((6, 1)) +
(fmr.Adjoint(Ti_im1) @ np.vstack((np.array([[0],[0],[0]]),
grav))).reshape((6, 1)) +
(fmr.ad(V[0:6, i]) @ A[0:6, i] * theta_dot[i]).reshape((6, 1))).reshape((6)))
F = np.zeros((self.screw_list.shape))
tau = np.zeros((theta.size, 1))
for i in range(self.num_dof-1, -1, -1):
if i == self.num_dof-1:
#continue
Tip1_i = self.link_mass_transforms[i+1].inv().TM
F[0:6, i] = (fmr.Adjoint(Tip1_i).conj().T @ end_effector_wrench +
self.box_spatial_links[i,:,:] @ vel_dot[0:6, i] - fmr.ad(V[0:6, i]).conj().T @
self.box_spatial_links[i,:,:] @ V[0:6, i])
else:
#print(( fmr.MatrixExp6(-fmr.VecTose3((A[0:6, i+1].reshape((6, 1))) *
# theta(i + 1))) @ ling.inv(self.link_mass_transforms[i+1,:,:]), 'problem'))
Tip1_i = (fmr.MatrixExp6(-fmr.VecTose3(A[0:6, i+1]) * theta[i + 1]) @
self.link_mass_transforms[i+1].inv().TM)
F[0:6, i] = (fmr.Adjoint(Tip1_i).conj().T @ F[0:6, i+1] +
self.box_spatial_links[i,:,:] @ vel_dot[0:6, i] -
fmr.ad(V[0:6, i]).conj().T @ self.box_spatial_links[i,:,:] @ V[0:6, i])
tau[i] = F[0:6, i].conj().T @ A[0:6, i]
return tau, A, V, vel_dot, F
def inverseDynamicsC(self, theta, theta_dot, theta_dot_dot, grav, end_effector_wrench):
"""
Inverse dynamics Implementation of algorithm in Lynch 8.4
Args:
theta: theta
theta_dot: theta 1st deriviative
theta_dot_dot: theta 2nd derivative
grav: gravity
end_effector_wrench: end effector wrench
Returns
tau: tau
M: todo
G: todo
"""
n = theta.size
A = np.zeros((6*n, n))
G = np.zeros((6*n, n))
for i in range (n):
A[(i-1)*6+1:(i-1)*6+6, i] = (
fmr.Adjoint(ling.inv(self.link_home_positions[i,:,:])) @ self.screw_list[0:6, i])
G[(i-1)*6+1:(i-1)*6+6,(i-1)*6+1:(i-1)*6+7] = self.box_spatial_links[i,:,:]
joint_axes = np.zeros((6*n, 6*n))
Vbase = np.zeros((6*n, 1))
T10 = ling.inv(self.FKLink(theta, 1))
vel_dot_base = (
np.hstack((self.Adjoint(T10) @ np.array([[0],[0],[0],[-grav]]), np.zeros((5*n, 1)))))
Ttipend = ling.inv(self.FK(theta)) @ self.FKLink(theta, n)
Ftip = np.vstack((np.zeros((5*n, 1)), fmr.Adjoint(Ttipend).conj().T @ end_effector_wrench))
for i in range (1, n):
Ti_im1 = ling.inv(self.FKlink(theta, i)) @ self.FKLink(theta, i-1)
joint_axes[(i-1) * 6 + 1:(i-1) *6 + 6, (i-2)*6+1:(i-2)*6+6] = fmr.Adjoint(Ti_im1)
L = ling.inv(np.identity((6*n))-joint_axes)
V = L @ (A @ theta_dot + Vbase)
adV = np.zeros((6*n, 6*n))
adAthd = np.zeros((6*n, 6*n))
for i in range(1, n):
adV[(i-1) * 6 + 1:(i-1) * 6+6,(i-1)*6+1:(i-1)*6+6] = fmr.ad(V[(i-1)*6+1:(i-1)*6+6, 0])
adAthd[(i-1)*6+1:(i-1) * 6 + 6, (i - 1) * 6 + 1 : (i - 1) * 6 + 6] = (
fmr.ad(theta_dot[i] @ A[(i - 1) * 6 + 1 : (i - 1)* 6 + 6, i]))
vel_dot = L @ (A @ theta_dot_dot - adAthd @ joint_axes @ V - adAthd @ Vbase @vel_dot_base)
F = L.conj().T @ (G @ vel_dot - adV.conj().T @ G @ V + Ftip)
tau = A.conj().T @ F
M = A.conj().T @ L.conj().T @ G @ L @ A
return tau, M, G
def forwardDynamicsE(self, theta, theta_dot, tau, grav, end_effector_wrench):
"""
Forward dynamics
Args:
theta: theta
theta_dot: theta 1st deriviative
tau:joint torques
grav: gravity
end_effector_wrench: end effector wrench
Returns
theta_dot_dot: todo
M: todo
h: todo
ee: todo
"""
M = self.massMatrix(theta)
h = self.coriolisGravity(theta, theta_dot, grav)
ee = self.endEffectorForces(theta, end_effector_wrench)
theta_dot_dot = ling.inv(M) @ (tau-h-ee)
return theta_dot_dot, M, h, ee
def forwardDynamics(self, theta, theta_dot, tau, grav, end_effector_wrench):
"""
Forward dynamics
Args:
theta: theta
theta_dot: theta 1st deriviative
tau:joint torques
grav: gravity
end_effector_wrench: end effector wrench
Returns
theta_dot_dot: todo
"""
theta_dot_dot = fmr.forwardDynamics(theta, theta_dot, tau, grav,
end_effector_wrench, self.link_mass_transforms,
self.box_spatial_links, self.screw_list)
return theta_dot_dot
def massMatrix(self, theta):
"""
calculates mass matrix for configuration
Args:
theta: theta for configuration
Returns:
M: mass matrix
"""
#Debugged - Liam 8/4/19
M = np.zeros(theta.size)
for i in range(theta.size):
Ji = self.jacobianLink(theta, i)
jt = Ji.conj().T @ self.box_spatial_links[i,:,:] @ Ji
#M = M + jt
#print(M, 'M1')
#print(fmr.massMatrix(theta, self.link_mass_transforms,
# self.box_spatial_links, self.screw_list), 'Masses')
return M
def coriolisGravity(self, theta, theta_dot, grav):
"""
Implements Coriolis Gravity from dynamics
Args:
theta: theta config
theta_dot: theta deriv
grav: gravity
Returns:
coriolisGravity
"""
h = self.inverseDynamicsE(theta, theta_dot, 0*theta, grav, np.zeros((6, 1)))
return h
def endEffectorForces(self, theta, end_effector_wrench):
"""
Calculates forces at the end effector
Args:
theta: joint configuration
end_effector_wrench: wrench at the end effector
Returns:
forces at the end effector
"""
grav = np.array([[0.0],[0.0],[-9.81]])
return self.inverseDynamicsE(theta, 0*theta, 0*theta,
np.zeros((3, 1)), end_effector_wrench)
"""
_ _ _ _____ _ _ _ _
| | | | (_) / ____| | | | | | | (_)
| | __ _ ___ ___ | |__ _ __ _ _ __ | | __ _| | ___ _ _| | __ _| |_ _ ___ _ __ ___
_ | |/ _` |/ __/ _ \| '_ \| |/ _` | '_ \ | | / _` | |/ __| | | | |/ _` | __| |/ _ \| '_ \/ __|
| |__| | (_| | (_| (_) | |_) | | (_| | | | | | |___| (_| | | (__| |_| | | (_| | |_| | (_) | | | \__ \
\____/ \__,_|\___\___/|_.__/|_|\__,_|_| |_| \_____\__,_|_|\___|\__,_|_|\__,_|\__|_|\___/|_| |_|___/
"""
#Converted to Python - Joshua
def jacobian(self, theta):
"""
Calculates Space Jacobian for given configuration
Args:
theta: joint configuration
Returns:
jacobian
"""
return fmr.JacobianSpace(self.screw_list, theta)
#Converted to Python - Joshua
def jacobianBody(self, theta):
"""
Calculates Body Jacobian for given configuration
Args:
theta: joint configuration
Returns:
jacobian
"""
return fmr.JacobianBody(self.screw_list_body, theta)
#Converted to Python - Joshua
#Fixed Bugs - Liam
def jacobianLink(self, theta, i):
"""
Calculates Space Jacobian for given configuration link
Args:
theta: joint configuration
i: joint index
Returns:
jacobian
"""
t_ad = self.FKLink(theta, i).inv().Adjoint()
t_js = fmr.JacobianSpace(self.screw_list[0:6, 0:i], theta[0:i])
t_z = np.zeros((6, len(theta) - 1))
t_mt = t_ad @ t_js
return np.hstack((t_mt, t_z))
def jacobianEE(self, theta):
"""
Calculates End Effector Jacobian for given configuration
Args:
theta: joint configuration
Returns:
jacobian
"""
jacobian = self.jacobian(theta)
return (self.FK(theta).inv() @ jacobian).Adjoint()
#return fmr.Adjoint()
def jacobianEEtrans(self, theta):
"""
Calculates Jacobian for given configuration
Args:
theta: joint configuration
Returns:
jacobian
"""
end_effector_temp = self.FK(theta)
end_effector_temp[0:3, 0:3] = np.identity((3))
jacobian = self.jacobian(theta)
return fmr.Adjoint(ling.inv(end_effector_temp)) @ jacobian
def numericalJacobian(self, theta):
"""
Calculates numerical Jacobian for given configuration
Args:
theta: joint configuration
Returns:
jacobian
"""
jacobian = np.zeros((6, theta.size))
temp = lambda x : np.reshape(self.FK(x),((1, 16)))
numerical_jacobian = fsr.numericalJacobian(temp, theta, 0.006)
for i in range(0, np.size(theta)):
jacobian[0:6, i] = (fmr.se3ToVec(ling.inv(self.FK(theta).conj().T) @
np.reshape(numerical_jacobian[:, i],((4, 4))).conj().T))
return jacobian
def getManipulability(self, theta = None):
"""
Calculates Manipulability at a given configuration
Args:
theta: configuration
Returns:
Manipulability parameters
"""
if theta == None:
theta = self._theta.copy()
Jb = self.jacobianBody(theta)
Jw = Jb[0:3,:] #Angular
Jv = Jb[3:6,:] #Linear
Aw = Jw @ Jw.T
Av = Jv @ Jv.T
AwEig, AwEigVec = np.linalg.eig(Aw)
AvEig, AvEigVec = np.linalg.eig(Av)
uAw = 1/(np.sqrt(max(AwEig))/np.sqrt(min(AwEig)))
uAv = 1/(np.sqrt(max(AvEig))/np.sqrt(min(AvEig)))
return AwEig, AwEigVec, uAw, AvEig, AvEigVec, uAv
"""
_____
/ ____|
| | __ _ _ __ ___ ___ _ __ __ _
| | / _` | '_ ` _ \ / _ \ '__/ _` |
| |___| (_| | | | | | | __/ | | (_| |
\_____\__,_|_| |_| |_|\___|_| \__,_|
"""
def addCamera(self, cam, end_effector_to_cam):
"""
adds a camera to the arm
Args:
cam: camera object
end_effector_to_cam: end effector to camera transform
"""
cam.moveCamera(self.end_effector_pos_global @ end_effector_to_cam)
img, joint_poses_home, suc = cam.getPhoto(self.end_effector_pos_global @
tm([0, 0, 1, 0, 0, 0]))
camL = [cam, end_effector_to_cam, img]
self.cameras.append(camL)
print(self.cameras)
def updateCams(self):
"""
Updates camera locations
"""
for i in range(len(self.cameras)):
self.cameras[i][0].moveCamera(self.end_effector_pos_global @ self.cameras[i][1])
"""
_____ _ __ __ _ _ _
/ ____| | | \/ | | | | | | |
| | | | __ _ ___ ___ | \ / | ___| |_| |__ ___ __| |___
| | | |/ _` / __/ __| | |\/| |/ _ \ __| '_ \ / _ \ / _` / __|
| |____| | (_| \__ \__ \ | | | | __/ |_| | | | (_) | (_| \__ \
\_____|_|\__,_|___/___/ |_| |_|\___|\__|_| |_|\___/ \__,_|___/
"""
def move(self, T, stationary = False):
"""
Moves the arm to another location
Args:
T: new base location
stationary: boolean for keeping the end effector in origianal location while
moving the base separately
"""
curpos = self.end_effector_pos_global.copy()
curth = self._theta.copy()
self.initialize(T, self.original_screw_list_body,
self.end_effector_home_local, self.original_joint_poses_home)
if stationary == False:
self.FK(self._theta)
else:
self.IK(curpos, curth)
def draw(self, ax):
"""
Draws the arm using the faser_plot library
"""
DrawArm(self, ax)
class URDFLoader:
def __init__(self):
self.type = 'LINK'
self.sub_type = None
self.axis = None
self.xyz_origin = None #Use as CG for Joints
self.mass = None
self.inertia = None
self.id = None
self.name = ''
self.parent = None
self.children = []
self.num_children = 0
#Link Visuals
self.vis_type = None
self.vis_origin = None
self.vis_properties = []
self.col_type = ''
self.col_origin = ''
self.col_properties = []
#Joint Specific
self.joint_limits = np.array([-2*np.pi, 2*np.pi])
self.max_effort = np.inf
self.max_velocity = np.inf
def display(self):
"""
Displays properties of calculated object
"""
if self.type == 'link':
print('link: ' + self.name + ' (' + str(self.id) + ')')
else:
print(self.sub_type + ' joint: ' + self.name + ' (' + str(self.id) + ')')
if self.parent is not None:
print('\tparent: ' + self.parent.name)
else:
print('\tHas no parent')
print('\tchildren:')
for child in self.children:
print('\t\t' + child.name)
print('\tOrigin: ' + str(self.xyz_origin))
if self.type == 'link':
print('\tMass: ' + str(self.mass))
print('\tVisType: ' + self.vis_type)
print('\tColType: ' + self.col_type)
print('\tVisProperties: ' + str(self.vis_properties))
print('\tColProperties: ' + str(self.col_properties))
else:
print('\tJoint Limits: ' + str(self.joint_limits))
print('\tMax Effort: ' + str(self.max_effort))
print('\tMax Velocity: ' + str(self.max_velocity))
def load_urdf_spec_file(urdf_fname, package_fname):
"""
Return a file path from a urdf specified file.
Args:
urdf_fname: urdf file name
package_fname: package_fname
Returns:
string of the absolute path
"""
if 'package://' in package_fname:
return find_package_dir(urdf_fname, package_fname)
elif package_fname[0:3] == '../':
return os.path.abspath(package_fname)
else:
return package_fname
def find_package_dir(urdf_fname, package_rel_dir):
"""
Attempts to find a directory specified by a ros package macro without ROS
Args:
urdf_fname: urdf file name/path *must be absolute
package_rel_dir: relative package directory
Returns:
string of the absolute file path
"""
real_path = os.path.abspath(urdf_fname)
real_split_path = real_path.split('/')
package_name = '/'.join(package_rel_dir[9:].split('/')[1:])
found_path = False
i = len(real_split_path) - 1
while not found_path and i > 0:
test_path_prefix = '/'.join(real_split_path[0:i])
test_path = test_path_prefix + '/' + package_name
if os.path.isfile(test_path):
#print(test_path)
return test_path
i -= 1
#print(package_name)
def loadArmFromURDF(file_name):
"""
Load an arm from a URDF File
Args:
file_name: file name of urdf object
Returns:
Arm object
"""
#Joints connect parent and child links
#Each joint has anoriginframethat defines the position
# and orientation of thechildlink frame relativeto the
# parentlink frame when the joint variable is zero.
# Theoriginis on he joint’s axis.
# Each joint has anaxis3-vector, a unit vector
# expressed inthechildlink’s frame,
# in the direction of positive rotation
# for a revolutejoint or positive translation
# for a prismatic joint.
try:
tree = ET.parse(file_name)
except:
if os.path.exists(file_name):
print('Malformed URDF or unrecognizeable format')
else:
print('File not Found')
return
root = tree.getroot()
link_count = 0
joint_count = 0
elements = []
def extractOrigin(x_obj):
"""
Shortcut for pulling from xml.
Args:
x: xml object root
Returns:
origin of root
"""
return x_obj.get('xyz').split(), x_obj.get('rpy').split()
def completeInertiaExtraction(child):
"""
Extracts inertial properties from child
Args:
child: child object
"""
ixx = child.find('inertia').get('ixx')
ixy = child.find('inertia').get('ixy')
ixz = child.find('inertia').get('ixz')
iyy = child.find('inertia').get('iyy')
iyz = child.find('inertia').get('iyz')
izz = child.find('inertia').get('izz')
inertia_matrix = np.array([
[ixx, ixy, ixz],
[ixy, iyy, iyz],
[ixz, iyz, izz]], dtype=float)
return inertia_matrix
def completeGeometryParse(child):
"""
complete Geometry parsing for children
Args:
child: child xml object to be parsed
"""
type = 'box'
origin = tm()
properties = []
for grand_child in child:
#print(grand_child.tag)
if grand_child.tag == 'origin':
origin = extractOrigin(grand_child)
elif grand_child.tag == 'geometry':
geometry_parent = child.find('geometry')
for geometry in geometry_parent:
if geometry.tag == 'box':
type = 'box'
properties = geometry.get('size').split()
elif geometry.tag == 'cylinder':
type = 'cyl'
properties.append(geometry.get('radius'))
properties.append(geometry.get('length'))
elif geometry.tag == 'sphere':
type = 'spr'
properties = geometry.get('radius')
elif geometry.tag == 'mesh':
type = 'msh'
properties = []
properties.append(load_urdf_spec_file(file_name, geometry.get('filename')))
properties.append(geometry.get('scale'))
if properties[1] is None:
properties[1] = 1.0
return type, origin, properties
def completeLinkParse(new_element, parent):
#print(new_element.name)
for child in parent:
if child.tag == 'inertial':
cg_xyz_raw, cg_rpy_raw = extractOrigin(child.find('origin'))
cg_origin_xyz = np.array(cg_xyz_raw, dtype=float)
cg_origin_rpy = np.array(cg_rpy_raw, dtype=float)
cg_origin_tm = tm([cg_origin_xyz[0], cg_origin_xyz[1], cg_origin_xyz[2],
cg_origin_rpy[0], cg_origin_rpy[1], cg_origin_rpy[2]])
new_element.xyz_origin = cg_origin_tm
new_element.inertia = completeInertiaExtraction(child)
new_element.mass = float(child.find('mass').get('value'))
elif child.tag == 'visual':
new_element.vis_type, new_element.vis_origin, new_element.vis_properties = \
completeGeometryParse(child)
elif child.tag == 'collision':
new_element.col_type, new_element.col_origin, new_element.col_properties = \
completeGeometryParse(child)
def completeJointParse(new_element, parent):
#print(new_element.name)
for child in parent:
if child.tag == 'axis':
axis = np.array(child.get('xyz').split(), dtype=float)
new_element.axis = axis
if child.tag == 'origin':
cg_xyz_raw, cg_rpy_raw = extractOrigin(child)
cg_origin_xyz = np.array(cg_xyz_raw, dtype=float)
cg_origin_rpy = np.array(cg_rpy_raw, dtype=float)
cg_origin_tm = tm([cg_origin_xyz[0], cg_origin_xyz[1], cg_origin_xyz[2],
0, 0, 0])
cg_origin_tm = cg_origin_tm @ tm([0, 0, 0, 0, 0, cg_origin_rpy[2]])
cg_origin_tm = cg_origin_tm @ tm([0, 0, 0, 0, cg_origin_rpy[1], 0])
#cg_origin_rpy[0], cg_origin_rpy[1], cg_origin_rpy[2]
cg_origin_tm = cg_origin_tm @ tm([0, 0, 0, cg_origin_rpy[0], 0, 0])
new_element.xyz_origin = cg_origin_tm
if child.tag == 'limit':
new_element.joint_limits[0] = child.get('lower')
new_element.joint_limits[1] = child.get('upper')
new_element.max_effort = child.get('effort')
new_element.max_velocity = child.get('velocity')
def findNamedElement(named_element, type='all'):
for element in elements:
if element.name == named_element:
if type == 'all':
return element
if type == 'link' and element.type == 'link':
return element
if type == 'joint' and element.type == 'joint':
return element
def totalChildren(element):
if element.num_children == 0:
return 1
else:
sum_children = 0
for child in element.children:
sum_children += totalChildren(child)
return sum_children
def mostChildren(element):
most_children = totalChildren(element.children[0])
max_ind = 0
for i in range(element.num_children):
child_qty = totalChildren(element.children[i])
if child_qty > most_children:
max_ind = i
most_children = child_qty
return element.children[max_ind]
def determineAxis(joint_location, axis):
joint_rotation = tm([joint_location[3], joint_location[4], joint_location[5]])
axis_unit = tm([axis[0], axis[1], axis[2], 0, 0, 0])
axis_new = (joint_rotation @ axis_unit)[0:3]
#if sum(abs(axis)) > 0:
# axis_new = abs(axis_new)
#else:
# axis_new = abs(axis_new) * -1
return axis_new.flatten()
#Perform First Pass Parse
for child in root:
new_element = URDFLoader()
new_element.type = child.tag
new_element.name = child.get('name')
if new_element.type == 'link':
completeLinkParse(new_element, child)
elif new_element.type == 'joint':
new_element.sub_type = child.get('type')
completeJointParse(new_element, child)
elements.append(new_element)
world_link = URDFLoader()
world_link.type = 'link'
world_link.sub_type = 'fixed'
elements.append(world_link)
#Assign Parents and Children to complete chain
for child in root:
if child.tag == 'joint':
this_element = findNamedElement(child.get('name'), 'joint')
parent_name = 'world'
child_name = ''
for sub_child in child:
if sub_child.tag == 'parent':
parent_name = sub_child.get('link')
elif sub_child.tag == 'child':
child_name = sub_child.get('link')
parent_element = findNamedElement(parent_name)
child_element = findNamedElement(child_name)
this_element.parent = parent_element
parent_element.children.append(this_element)
parent_element.num_children += 1
child_element.parent = this_element
this_element.children.append(child_element)
this_element.num_children += 1
#Account for cases that don't use world
if world_link.num_children == 0:
elements.remove(world_link)
for element in elements:
if element.type == 'link' and element.parent is None and element.num_children > 0:
world_link = element
break
elif (element.type == 'joint' and element.sub_type == 'fixed' and
element.parent is None and element.num_children > 0):
world_link = element
break
num_dof = 0
#Count the number of degrees of freedom along longest kinematic chain
temp_element = world_link
while temp_element.num_children > 0:
if temp_element.type == 'joint' and temp_element.sub_type != 'fixed':
num_dof += 1
temp_element = mostChildren(temp_element)
home = tm()
joint_poses = [home]
joint_axes = np.zeros((3, num_dof))
joint_homes = np.zeros((3, num_dof))
arrind = 0
#Figure out the link home poses
temp_element = world_link
masses = []
masses_cg = []
link_names = []
joint_names = []
vis_props = []
col_props = []
joint_mins = []
joint_maxs = []
joint_vel_limits = []
joint_effort_limits = []
joint_origins = []
eef_transform = tm()
while temp_element.num_children > 0:
#temp_element.display()
if temp_element.type == 'link' or temp_element.sub_type == 'fixed': #If a Link or a Fixed Joint
if temp_element.type == 'link':
if temp_element.mass is not None:
masses.append(temp_element.mass)
masses_cg.append(temp_element.xyz_origin)
link_names.append(temp_element.name)
vis_props.append([temp_element.vis_type,
temp_element.vis_origin, temp_element.vis_properties])
col_props.append([temp_element.col_type,
temp_element.col_origin, temp_element.col_properties])
if temp_element.sub_type == 'fixed': #If it's a fixed joint, it's a pseudo link
eef_transform = eef_transform @ temp_element.xyz_origin
joint_origins.append(temp_element.xyz_origin) # Fixed Joints are still origins
temp_element = mostChildren(temp_element)
continue
joint_origins.append(temp_element.xyz_origin)
joint_poses.append(joint_poses[-1] @ temp_element.xyz_origin)
joint_names.append(temp_element.name)
joint_mins.append(temp_element.joint_limits[0])
joint_maxs.append(temp_element.joint_limits[1])
joint_vel_limits.append(temp_element.max_velocity)
joint_effort_limits.append(temp_element.max_effort)
joint_axes[0:3, arrind] = determineAxis(joint_poses[-1], temp_element.axis)
joint_homes[0:3, arrind] = joint_poses[-1][0:3].flatten()
temp_element = mostChildren(temp_element)
arrind+=1
#disp(joint_poses, 'Joint poses')
#Build the screw list
screw_list = np.zeros((6, num_dof))
for i in range(num_dof):
screw_list[0:6, i] = np.hstack((
joint_axes[0:3, i],
np.cross(joint_homes[0:3, i], joint_axes[0:3, i])))
arm = Arm(tm(), screw_list, joint_poses[-1], joint_homes, joint_axes)
arm.link_home_positions = joint_poses
arm.masses = np.array(masses)
arm.masses_cg = masses_cg
arm.link_names = link_names
arm.joint_names = joint_names
arm.joint_mins = np.array(joint_mins)
arm.joint_maxs = np.array(joint_maxs)
arm.max_vels = np.array(joint_vel_limits)
arm.max_effort = np.array(joint_effort_limits)
arm.eef_transform = eef_transform
arm.vis_props = vis_props
arm.col_props = col_props
arm.joint_origins = joint_origins
#disp(joint_poses[1:], 'intended')
#Placeholder Dimensions
dims = np.zeros((3, num_dof + 1))
for i in range(num_dof + 1):
dims[0:3,
i] = np.array([.1, .1, .1])
arm.link_dimensions = dims
return arm
def loadArmFromJSON(file_name):
"""
Load Arm From a JSON File
Args:
file_name: filename of the json to be loaded
Returns:
Arm object
"""
with open(file_name, 'r') as arm_file:
arm_data = json.load(arm_file)
num_dof = arm_data["NumDof"]
end_effector_home = tm(arm_data["EndEffectorLocation"])
base_location = tm(arm_data["BaseLocation"])
link_mass_centers_raw = []
joint_centers_raw = []
link_masses_raw = arm_data["LinkMasses"]
joint_home_positions_raw = []
box_dimensions_raw = []
for i in range(num_dof+1):
ii = str(i)
box_dimensions_raw.append(arm_data["LinkBoxDimensions"][ii])
joint_home_positions_raw.append(arm_data["JointHomePositions"][ii])
if i == num_dof:
continue
link_mass_centers_raw.append(tm(arm_data["LinkCentersOfMass"][ii]))
joint_centers_raw.append(arm_data["JointAxes"][ii])
joint_axes = np.array(joint_centers_raw).T
joint_home_positions = np.array(joint_home_positions_raw).T
disp(joint_axes)
screw_list = np.zeros((6, num_dof))
for i in range(0, num_dof):
screw_list[0:6, i] = np.hstack((joint_axes[0:3, i],
np.cross(joint_home_positions[0:3, i], joint_axes[0:3, i])))
dimensions = np.array(box_dimensions_raw).T
Mi = [None] * (num_dof + 1)
Mi[0] = link_mass_centers_raw[0]
for i in range(1, num_dof):
Mi[i] = link_mass_centers_raw[i].inv() @ link_mass_centers_raw[i]
Mi[num_dof] = link_mass_centers_raw[num_dof - 1] @ end_effector_home
masses = np.array(link_masses_raw)
box_spatial = np.zeros((num_dof, 6, 6))
for i in range(num_dof):
box_spatial[i,:,:] = fsr.boxSpatialInertia(
masses[i], dimensions[0, i], dimensions[1, i], dimensions[2, i])
arm = Arm(base_location, screw_list,
end_effector_home, joint_home_positions, joint_axes)
disp(end_effector_home, 'EEPOS')
home_poses = []
for pose in joint_home_positions_raw:
print(pose)
home_poses.append(tm([pose[0], pose[1], pose[2], 0, 0, 0]))
#arm.setDynamicsProperties(Mi, link_mass_centers_raw, box_spatial, dimensions)
arm.setDynamicsProperties(Mi, home_poses, box_spatial, dimensions)
return arm
```
#### File: basic_robotics/kinematics/sp_model.py
```python
from ..general import tm, fmr, fsr
from ..utilities.disp import disp
import numpy as np
import scipy as sci
import scipy.linalg as ling
import copy
import json
class SP:
#Conventions:
#Filenames: snake_case
#Variables: snake_case
#Functions: camelCase
#ClassNames: CapsCase
#Docstring: Google
def __init__(self, bottom_joints, top_joints, bT, tT, leg_ext_min,
leg_ext_max, bottom_plate_thickness, top_plate_thickness, name):
"""
Initializes a new Stewart Platform Object
Args:
bottom_joints (ndarray): Bottom joint positions of the stewart platform
top_joints (ndarray): Top joint positions of the stewart platform
bT (tm): bottom plate position
tT (tm): top plate position
leg_ext_min (float): minimum leg ext limit
leg_ext_max (float): maximum leg ext limit
bottom_plate_thickness (float): bottom plate thickness
top_plate_thickness (float): top plate thickness
name (string): name of the sp
Returns:
SP: sp model object
"""
self.bottom_joints = np.copy(bottom_joints)
self.top_joints = np.copy(top_joints)
self.bottom_joints_init = self.bottom_joints.conj().transpose()
self.top_joints_init = self.top_joints.conj().transpose()
self.bottom_plate_pos = bT.copy()
self.top_plate_pos = tT.copy()
self.bottom_joints_space = np.zeros((3, 6))
self.top_joints_space = np.zeros((3, 6))
self.current_plate_transform_local = tm()
#Debug
self.leg_ext_safety = .001
self.debug = 0
#Physical Parameters
self.bottom_plate_thickness = bottom_plate_thickness
self.top_plate_thickness = top_plate_thickness
if leg_ext_min == 0:
self.leg_ext_min = 0
self.leg_ext_max = 2
self.leg_ext_min = leg_ext_min
self.leg_ext_max = leg_ext_max
#Reserve Val
self.nominal_height = fsr.distance(bT, tT)
self.nominal_plate_transform = tm([0, 0, self.nominal_height, 0, 0, 0])
#Drawing Characteristics
self.outer_top_radius = 0
self.outer_bottom_radius = 0
self.act_shaft_radius = 0
self.act_motor_radius = 0
#Empty array indicates these values haven't been populated yet
self.leg_forces = np.zeros(1)
self.top_plate_wrench = np.zeros(1)
self.bottom_plate_wrench = np.zeros(1)
#Mass values from bottom mass, top mass, and actuator portion masses can be set directly.
self.bottom_plate_mass = 0
self.top_plate_mass = 0
self.act_shaft_mass = 0
self.act_motor_mass = 0
self.act_shaft_newton_force = 0
self.act_motor_newton_force = 0
self.top_plate_newton_force = 0
self.bottom_plate_newton_force = 0
self.grav = 9.81
self.dir = np.array([0, 0, -1])
self.act_shaft_grav_center = 0
self.act_motor_grav_center = 0
self.force_limit= 0
#Tolerances and Limits
self.joint_deflection_max = 140/2*np.pi/180#2*np.pi/5
self.plate_rotation_limit = np.cos(60*np.pi/180)
#Newton Settings
self.tol_f = 1e-5/2
self.tol_a = 1e-5/2
self.max_iterations = 1e4
#Errors and Counts
self.fail_count = 0
self.validation_settings = [1, 0, 0, 1]
self.fk_mode = 1
self.validation_error = ""
self.IK(bT, tT, protect = True)
self.bottom_joint_angles_init = self.top_joints_space.T.copy()
self.bottom_joint_angles = self.bottom_joints_space.T.copy()
self.bottom_joint_angles_init = [None] * 6
self.bottom_joint_angles = [None] * 6
for i in range(6):
self.bottom_joint_angles_init[i] = fsr.globalToLocal(self.getBottomT(),
tm([self.top_joints_space.T[i][0], self.top_joints_space.T[i][1],
self.top_joints_space.T[i][2], 0, 0, 0]))
self.bottom_joint_angles[i] = fsr.globalToLocal(self.getTopT(),
tm([self.bottom_joints_space.T[i][0], self.bottom_joints_space.T[i][1],
self.bottom_joints_space.T[i][2], 0, 0, 0]))
t1 = fsr.globalToLocal(self.getTopT() @ tm([0, 0, -self.top_plate_thickness, 0, 0, 0]),
tm([self.top_joints_space[0, 0],
self.top_joints_space[1, 0],
self.top_joints_space[2, 0], 0, 0, 0]))
t2 = fsr.globalToLocal(self.getTopT() @ tm([0, 0, -self.top_plate_thickness, 0, 0, 0]),
tm([self.top_joints_space[0, 2],
self.top_joints_space[1, 2],
self.top_joints_space[2, 2], 0, 0, 0]))
t3 = fsr.globalToLocal(self.getTopT() @ tm([0, 0, -self.top_plate_thickness, 0, 0, 0]),
tm([self.top_joints_space[0, 4],
self.top_joints_space[1, 4],
self.top_joints_space[2, 4], 0, 0, 0]))
self.reorients = [t1, t2, t3]
#Compatibility
self.plate_thickness_avg = (self.top_plate_thickness + self.bottom_plate_thickness) / 2
self.nominal_plate_transform = tm([0, 0, self.plate_thickness_avg, 0, 0, 0])
#Validation Settings
"""
_____ _ _ _ _____ _ _
/ ____| | | | | /\ | | / ____| | | | |
| | __ ___| |_| |_ ___ _ __ ___ / \ _ __ __| | | (___ ___| |_| |_ ___ _ __ ___
| | |_ |/ _ \ __| __/ _ \ '__/ __| / /\ \ | '_ \ / _` | \___ \ / _ \ __| __/ _ \ '__/ __|
| |__| | __/ |_| || __/ | \__ \ / ____ \| | | | (_| | ____) | __/ |_| || __/ | \__ \
\_____|\___|\__|\__\___|_| |___/ /_/ \_\_| |_|\__,_| |_____/ \___|\__|\__\___|_| |___/
"""
def setMasses(self, plate_mass_general, act_shaft_mass,
act_motor_mass, grav=9.81, top_plate_mass=0):
"""
Set masses for each SP in the Assembler, note that because central platforms
share plates, these weights are halved with respect to end plates
Args:
plate_mass_general (float): mass of bottom plate (both if top is not specified) (kg)
act_shaft_mass (float): mass of actuator shaft (kg)
act_motor_mass (float): mass of actuator motor (kg)
grav (float): [Optional, default 9.81] acceleration due to gravity
top_plate_mass (float): [Optional, default 0] top plate mass (kg)
"""
self.bottom_plate_mass = plate_mass_general
if top_plate_mass != 0:
self.top_plate_mass = top_plate_mass
else:
self.top_plate_mass = plate_mass_general
self.setGrav(grav)
self.act_shaft_mass = act_shaft_mass
self.act_motor_mass = act_motor_mass
self.act_motor_newton_force = self.act_motor_mass * self.grav
self.act_shaft_newton_force = self.act_shaft_mass * self.grav
self.top_plate_newton_force = self.top_plate_mass * self.grav
self.bottom_plate_newton_force = self.bottom_plate_mass * self.grav
def setGrav(self, grav=9.81):
"""
Sets Gravity
Args:
grav (float): Acceleration due to gravity
Returns:
None: None
"""
self.grav = grav
def setCOG(self, motor_grav_center, shaft_grav_center):
"""
Sets the centers of gravity for actuator components
Args:
motor_grav_center (float): distance from top of actuator to actuator shaft COG
shaft_grav_center (float): distance from bottom of actuator to actuator motor COG
"""
self.act_shaft_grav_center = shaft_grav_center
self.act_motor_grav_center = motor_grav_center
def setMaxAngleDev(self, max_angle_dev=55):
"""
Set the maximum angle joints can deflect before failure
Args:
max_angle_dev (float): maximum deflection angle (degrees)
"""
self.joint_deflection_max = max_angle_dev*np.pi/180
def setMaxPlateRotation(self, max_plate_rotation=60):
"""
Set the maximum angle the plate can rotate before failure
Args:
max_plate_rotation (Float): Maximum angle before plate rotation failure (degrees)
"""
self.plate_rotation_limit = np.cos(max_plate_rotation * np.pi / 180)
def setDrawingDimensions(self, outer_top_radius,
outer_bottom_radius, act_shaft_radius, act_motor_radius):
"""
Set Drawing Dimensions
Args:
outer_top_radius (Float): Description of parameter `outer_top_radius`.
outer_bottom_radius (Float): Description of parameter `outer_bottom_radius`.
act_shaft_radius (Float): Description of parameter `act_shaft_radius`.
act_motor_radius (Float): Description of parameter `act_motor_radius`.
"""
self.outer_top_radius = outer_top_radius
self.outer_bottom_radius = outer_bottom_radius
self.act_shaft_radius = act_shaft_radius
self.act_motor_radius = act_motor_radius
def setPlatePos(self, bottom_plate_pos, top_plate_pos):
"""
Set plate positions. called internally
Args:
bottom_plate_pos (tm): bottom plate transformation in space frame
top_plate_pos (tm): top plate transformation in space frame
"""
if bottom_plate_pos is not None:
self.bottom_plate_pos = bottom_plate_pos
if top_plate_pos is not None:
self.top_plate_pos = top_plate_pos
def getBottomJoints(self):
"""
get the bottom joint positions in space. Not orientations
Returns:
ndarray(Float): bottom joint positions
"""
return self.bottom_joints_space
def getTopJoints(self):
"""
get the top joint positions in space. Not orientations
Returns:
ndarray(Float): top joint positions in space
"""
return self.top_joints_space
def getCurrentLocalTransform(self):
"""
Get the current local transform between bottom and top plate
Returns:
tm: Top plate relative to bottom plate
"""
return self.current_plate_transform_local
def getLegForces(self):
"""
Return calculated leg forces
Returns:
ndarray(Float): Leg forces (N)
"""
return self.leg_forces
def getLens(self):
"""
Get Leg Lengths
Returns:
ndarray(Float): Leg Lengths
"""
return self.lengths.copy()
def getTopT(self):
"""
Return the transform of the top plate
Returns:
tm: top plate transform in space frame
"""
return self.top_plate_pos.copy()
def getBottomT(self):
"""
Return the transform of the bottom plate
Returns:
tm: bottom plate transform in space frame
"""
return self.bottom_plate_pos.copy()
def getActuatorLoc(self, num, type = 'm'):
"""
Returns the position of a specified actuator. Takes in an actuator number and a type.
m for actuator midpoint
b for actuator motor position
t for actuator top position
Args:
num (Int): number of actuator to return
type (Char): property of actuator to return
Returns:
ndarray(Float): location of desired point
"""
pos = 0
if type == 'm':
pos = np.array([(self.bottom_joints_space[0, num] + self.top_joints_space[0, num])/2,
(self.bottom_joints_space[1, num] + self.top_joints_space[1, num])/2,
(self.bottom_joints_space[2, num] + self.top_joints_space[2, num])/2])
bottom_act_joint = tm([self.bottom_joints_space[0, num],
self.bottom_joints_space[1, num], self.bottom_joints_space[2, num], 0, 0, 0])
top_act_joint = tm([self.top_joints_space[0, num],
self.top_joints_space[1, num], self.top_joints_space[2, num], 0, 0, 0])
if type == 'b':
#return fsr.adjustRotationToMidpoint(bottom_act_joint, bottom_act_joint,
# top_act_joint, mode = 1) @ tm([0, 0, self.act_motor_grav_center, 0, 0, 0])
return fsr.getUnitVec(bottom_act_joint,
top_act_joint, self.act_motor_grav_center)
if type == 't':
#return fsr.adjustRotationToMidpoint(top_act_joint, top_act_joint, bottom_act_joint,
# mode = 1) @ tm([0, 0, self.act_shaft_grav_center, 0, 0, 0])
return fsr.getUnitVec(top_act_joint,
bottom_act_joint, self.act_shaft_grav_center)
new_position = tm([pos[0], pos[1], pos[2], 0, 0, 0])
return new_position
def spinCustom(self, rot):
"""
Rotates plate to meet desired transform
Args:
rot (Float): rotation in radians
"""
old_base_pos = self.getBottomT()
self.move(tm())
current_top_pos = self.getTopT()
top_joints_copy = self.top_joints_space.copy()
bottom_joints_copy = self.bottom_joints_space.copy()
top_joints_origin_copy = self.top_joints[2, 0:6]
bottom_joints_origin_copy = self.bottom_joints[2, 0:6]
rotation_transform = tm([0, 0, 0, 0, 0, rot * np.pi / 180])
self.move(rotation_transform)
top_joints_space_new = self.top_joints_space.copy()
bottom_joints_space_new = self.bottom_joints_space.copy()
top_joints_copy[0:2, 0:6] = top_joints_space_new[0:2, 0:6]
bottom_joints_copy[0:2, 0:6] = bottom_joints_space_new[0:2, 0:6]
bottom_joints_copy[2, 0:6] = bottom_joints_origin_copy
top_joints_copy[2, 0:6] = top_joints_origin_copy
self.move(tm())
self.bottom_joints = bottom_joints_copy
self.top_joints = top_joints_copy
self.bottom_joints_space = bottom_joints_space_new
self.top_joints_space = top_joints_space_new
self.move(old_base_pos)
def IK(self, bottom_plate_pos=None, top_plate_pos=None, protect=False):
"""
Calculate inverse kinematics for given goals
Args:
bottom_plate_pos (tm): bottom plate position
top_plate_pos (tm): top plate position
protect (Bool): If true, bypass any safeties
Returns:
ndarray(Float): leg lengths
Bool: validity of pose
"""
bottom_plate_pos, top_plate_pos = self.bottomTopCheck(bottom_plate_pos, top_plate_pos)
leg_lengths, bottom_plate_pos, top_plate_pos = self.IKHelper(
bottom_plate_pos, top_plate_pos, protect)
#Determine current transform
self.bottom_plate_pos = bottom_plate_pos.copy()
self.top_plate_pos = top_plate_pos.copy()
#Ensure a valid position
valid = True
if not protect:
valid = self.validate()
return leg_lengths, valid
def IKHelper(self, bottom_plate_pos=None, top_plate_pos=None, protect=False):
"""
Calculates Inverse Kinematics for a single stewart plaform.
Takes in bottom plate transform, top plate transform, protection paramter, and direction
Args:
bottom_plate_pos (tm): bottom plate position
top_plate_pos (tm): top plate position
protect (Bool): If true, bypass any safeties
Returns:
ndarray(Float): lengths of legs in meters
tm: bottom plate position new
tm: top plate position new
"""
#If not supplied paramters, draw from stored values
bottom_plate_pos, top_plate_pos = self.bottomTopCheck(
bottom_plate_pos, top_plate_pos)
#Check for excessive rotation
#Poses which would be valid by leg length
#But would result in singularity
#Set bottom and top transforms
#self.bottom_plate_pos = bottom_plate_pos
#self.top_plate_pos = top_plate_pos
#Call the IK method from the JIT numba file (FASER HIGH PER)
#Shoulda just called it HiPer FASER. Darn.
self.lengths, self.bottom_joints_space, self.top_joints_space = fmr.SPIKinSpace(
bottom_plate_pos.gTM(),
top_plate_pos.gTM(),
self.bottom_joints,
self.top_joints,
self.bottom_joints_space,
self.top_joints_space)
self.current_plate_transform_local = fsr.globalToLocal(
bottom_plate_pos, top_plate_pos)
return np.copy(self.lengths), bottom_plate_pos, top_plate_pos
def FK(self, L, bottom_plate_pos =None, reverse = False, protect=False):
"""
Calculate Forward Kinematics for desired leg lengths
Args:
L (ndarray(Float)): Goal leg lengths
bottom_plate_pos (tm): bottom plate position
reverse (Bool): Boolean to reverse action. If true, treat the top plate as stationary.
protect (Bool): Boolean to bypass error detection and correction. Bypass if True
Returns:
tm: top plate configuration
Bool: validity
"""
#FK host function, calls subfunctions depedning on the value of fk_mode
#return self.FKSciRaphson(L, bottom_plate_pos, reverse, protect)
#bottom_plate_pos, n = self._applyPlateTransform(bottom_plate_pos = bottom_plate_pos)
if self.fk_mode == 0:
bottom, top = self.FKSolve(L, bottom_plate_pos, reverse, protect)
else:
bottom, top = self.FKRaphson(L, bottom_plate_pos, reverse, protect)
if not self.continuousTranslationConstraint():
if self.debug:
disp("FK Resulted In Inverted Plate Alignment. Repairing...")
#self.IK(top_plate_pos = self.getBottomT() @ tm([0, 0, self.nominal_height, 0, 0, 0]))
#self.FK(L, protect = True)
self.fixUpsideDown()
self.current_plate_transform_local = fsr.globalToLocal(bottom, top)
#self._undoPlateTransform(bottom, top)
valid = True
if not protect:
valid = self.validate()
return top, valid
def FKSciRaphson(self, L, bottom_plate_pos=None, reverse=False, protect=False):
"""
Use Python's Scipy module to calculate forward kinematics. Takes in length list,
optionally bottom position, reverse parameter, and protection
Args:
L (ndarray(Float)): Goal leg lengths
bottom_plate_pos (tm): bottom plate transformation in space frame
reverse (Bool): Boolean to reverse action. If true, treat the top plate as stationary.
protect (Bool): Boolean to bypass error detection and correction. Bypass if True
Returns:
tm: bottom plate transform
tm: top plate transform
"""
L = L.reshape((6, 1))
mag = lambda x : abs(x[0]) + abs(x[1])+ abs(x[2]) + abs(x[3]) + abs(x[4]) + abs(x[5])
fk = lambda x : mag(self.IKHelper(bottom_plate_pos, tm(x), protect = True)[0] - L).flatten()
jac = lambda x : (self.inverseJacobianSpace(bottom_plate_pos, tm(x)))
x0 = (self.getBottomT() @ self.nominal_plate_transform).TAA.flatten()
root = sci.optimize.minimize(fk, x0).x
#disp(root, "ROOT")
self.IK(bottom_plate_pos, tm(root), protect = True)
return bottom_plate_pos, tm(root)
def simplifiedRaphson(self, L, bottom_plate_pos=None, reverse=False, protect=False):
"""
Follow the method in the Parallel Robotics Textbook
Args:
L (ndarray(Float)): Goal leg lengths
bottom_plate_pos (tm): bottom plate transformation in space frame
reverse (Bool): Boolean to reverse action. If true, treat the top plate as stationary.
protect (Bool): Boolean to bypass error detection and correction. Bypass if True
Returns:
tm: top plate transform
"""
tol_f = 1e-4;
tol_a = 1e-4;
#iteration limits
max_iterations = 1e4
if bottom_plate_pos == None:
bottom_plate_pos = self.bottom_plate_pos
x = self.getTopT().copy()
iter = 0
success = False
while not success and iter < max_iterations:
x = x + self.inverseJacobianSpace(bottom_plate_pos, x ) @ (L -
self.IK(top_plate_pos = x, protect = protect))
x.angleMod()
#disp(x)
if np.all(abs(x[0:3]) < tol_f) and np.all(abs(x[3:6]) < tol_a):
success = True
iter+=1
if iter == max_iterations:
print("Failed to Converge")
return tm(x)
def FKSolve(self, L, bottom_plate_pos=None, reverse=False, protect=False):
"""
Older version of python solver, no jacobian used. Takes in length list,
optionally bottom position, reverse parameter, and protection
Args:
L (ndarray(Float)): Goal leg lengths
bottom_plate_pos (tm): bottom plate transformation in space frame
reverse (Bool): Boolean to reverse action. If true, treat the top plate as stationary.
protect (Bool): Boolean to bypass error detection and correction. Bypass if True
Returns:
tm: bottom plate transform
tm: top plate transform
"""
#Do SPFK with scipy inbuilt solvers. Way less speedy o
#Or accurate than Raphson, but much simpler to look at
L = L.reshape((6, 1))
self.lengths = L.reshape((6, 1)).copy()
#jac = lambda x : self.inverseJacobianSpace(top_plate_pos = x)
#Slightly different if the platform is supposed to be "reversed"
if reverse:
if bottom_plate_pos == None:
top_plate_pos = self.getTopT()
else:
top_plate_pos = bottom_plate_pos
fk = lambda x : (self.IK(tm(x), top_plate_pos, protect = True) - L).reshape((6))
sol = tm(sci.optimize.fsolve(fk, self.getTopT().gTAA()))
#self.top_plate_pos = bottom_plate_pos
else:
#General calls will go here.
if bottom_plate_pos == None:
#If no bottom pose is supplied, use the last known.
bottom_plate_pos = self.getBottomT()
#Find top pose that produces the desired leg lengths.
fk = lambda x : (self.IKHelper(bottom_plate_pos, tm(x),
protect = True)[0] - L).reshape((6))
sol = tm(sci.optimize.fsolve(fk, self.getTopT().TAA))
#self.bottom_plate_pos = bottom_plate_pos
#If not "Protected" from recursion, call IK.
if not protect:
self.IK(protect = True)
return bottom_plate_pos, sol
def FKRaphson(self, L, bottom_plate_pos =None, reverse=False, protect=False):
"""
FK Solver
Adapted from the work done by
#http://jak-o-shadows.github.io/electronics/stewart-gough/stewart-gough.html
Args:
L (ndarray(Float)): Goal leg lengths
bottom_plate_pos (tm): bottom plate transformation in space frame
reverse (Bool): Boolean to reverse action. If true, treat the top plate as stationary.
protect (Bool): Boolean to bypass error detection and correction. Bypass if True
Returns:
tm: bottom plate transform
tm: top plate transform
"""
if self.debug:
disp("Starting Raphson FK")
#^Look here for the original code and paper describing how this works.
if bottom_plate_pos == None:
bottom_plate_pos = self.getBottomT()
success = True
L = L.reshape((6))
self.lengths = L.reshape((6, 1)).copy()
bottom_plate_pos_backup = bottom_plate_pos.copy()
# @ tm([0, 0, self.bottom_plate_thickness, 0, 0, 0])
bottom_plate_pos = np.eye(4)
#bottom_plate_pos = bottom_plate_pos_backup.copy()
#newton-raphson tolerances
#iteration limits
iteration = 0
#Initial Guess Position
#a = fsr.TMtoTAA(bottom_plate_pos @
# fsr.TM([0, 0, self.nominal_height, 0, 0, 0])).reshape((6))
#disp(a, "Attempt")
try:
#ap = (fsr.localToGlobal(tm([0, 0, self.nominal_height, 0, 0, 0]), tm()))
ap = (fsr.localToGlobal(self.current_plate_transform_local, tm())).gTAA().reshape((6))
a = np.zeros((6))
for i in range(6):
a[i] = ap[i]
#Call the actual algorithm from the high performance faser library
#Pass in initial lengths, guess, bottom and top plate positions,
#max iterations, tolerances, and minimum leg lengths
a, iteration = fmr.SPFKinSpaceR(bottom_plate_pos, L, a,
self.bottom_joints_init, self.top_joints_init,
self.max_iterations, self.tol_f, self.tol_a, self.leg_ext_min)
#If the algorithm failed, try again, but this time set initial position to neutral
if iteration == self.max_iterations:
a = np.zeros((6))
a[2] = self.nominal_height
a, iteration = fmr.SPFKinSpaceR(bottom_plate_pos, L, a,
self.bottom_joints_init, self.top_joints_init,
self.max_iterations, self.tol_f, self.tol_a, self.leg_ext_min)
if iteration == self.max_iterations:
if self.debug:
print("Raphson Failed to Converge")
self.fail_count += .1
self.IK(bottom_plate_pos_backup,
bottom_plate_pos_backup @ self.nominal_plate_transform, protect = True)
return self.getBottomT(), self.getTopT()
#Otherwise return the calculated end effector position
#coords =tm(bottom_plate_pos_backup @ fsr.TAAtoTM(a.reshape((6, 1))))
coords = bottom_plate_pos_backup @ tm(a)
# @ tm([0, 0, self.top_plate_thickness, 0, 0, 0])
#Disabling these cause unknown issues so far.
#self.bottom_plate_pos = bottom_plate_pos_backup
#self.top_plate_pos = coords
self.IKHelper(bottom_plate_pos_backup, coords, protect = True)
self.bottom_plate_pos = bottom_plate_pos_backup
#@ tm([0, 0, self.bottom_plate_thickness, 0, 0, 0])
self.top_plate_pos = coords #@ tm([0, 0, self.top_plate_thickness, 0, 0, 0])
if self.debug:
disp("Returning from Raphson FK")
return bottom_plate_pos_backup, tm(coords)
except Exception as e:
if self.debug:
disp("Raphson FK Failed due to: " + str(e))
self.fail_count+=1
return self.FKSciRaphson(L, bottom_plate_pos_backup, reverse, protect)
def lambdaTopPlateReorientation(self, stopt):
"""
Only used as an assistance function for fixing plate alignment
Args:
stopt (tm): top transform in space frame.
Returns:
ndarray(Float): distances array
"""
reorient_helper_1 = fsr.localToGlobal(stopt, self.reorients[0])
reorient_helper_2 = fsr.localToGlobal(stopt, self.reorients[1])
reorient_helper_3 = fsr.localToGlobal(stopt, self.reorients[2])
d1 = fsr.distance(reorient_helper_1,
tm([self.top_joints_space[0, 0],
self.top_joints_space[1, 0],
self.top_joints_space[2, 0], 0, 0, 0]))
d2 = fsr.distance(reorient_helper_2,
tm([self.top_joints_space[0, 2],
self.top_joints_space[1, 2],
self.top_joints_space[2, 2], 0, 0, 0]))
d3 = fsr.distance(reorient_helper_3,
tm([self.top_joints_space[0, 4],
self.top_joints_space[1, 4],
self.top_joints_space[2, 4], 0, 0, 0]))
return np.array([d1 , d2 , d3])
def reorientTopPlate(self):
"""
Subfunction of fixUpsideDown,
responsible for orienting the top plate transform after mirroring
"""
top_true = self.getTopT() @ tm([0, 0, -self.top_plate_thickness, 0, 0, 0])
res = lambda x : self.lambdaTopPlateReorientation(
tm([top_true[0], top_true[1], top_true[2], x[0], x[1], x[2]]))
x_init = self.getTopT()[3:6].flatten()
solution = sci.optimize.fsolve(res, x_init)
top_true[3:6] = solution
self.top_plate_pos = top_true @ tm([0, 0, self.top_plate_thickness, 0, 0, 0])
#disp(self.lambdaTopPlateReorientation(self.getTopT() @
# tm([0, 0, -self.top_plate_thickness, 0, 0, 0])))
def fixUpsideDown(self):
"""
In situations where the top plate is inverted underneath
the bottom plate, yet lengths are valid,
This function can be used to mirror all the joint locations and "fix" the resultant problem
"""
for num in range(6):
#reversable = fsr.globalToLocal(tm([self.top_joints_space[0, num],
# self.top_joints_space[1, num], self.top_joints_space[2, num], 0, 0, 0]),
# tm([self.bottom_joints_space[0, num],
# self.bottom_joints_space[1, num],
# self.bottom_joints_space[2, num], 0, 0, 0]))
#newTJ = tm([self.bottom_joints_space[0, num],
# self.bottom_joints_space[1, num],
# self.bottom_joints_space[2, num], 0, 0, 0]) @ reversable
newTJ = fsr.mirror(self.getBottomT() @
tm([0, 0, -self.bottom_plate_thickness, 0, 0, 0]),
tm([self.top_joints_space[0, num],
self.top_joints_space[1, num],
self.top_joints_space[2, num], 0, 0, 0]))
self.top_joints_space[0, num] = newTJ[0]
self.top_joints_space[1, num] = newTJ[1]
self.top_joints_space[2, num] = newTJ[2]
self.lengths[num] = fsr.distance(
self.top_joints_space[:, num], self.bottom_joints_space[:, num])
top_true = fsr.mirror(self.getBottomT() @ tm([0, 0, -self.bottom_plate_thickness, 0, 0, 0]),
self.getTopT() @ tm([0, 0, -self.top_plate_thickness, 0, 0, 0]))
top_true[3:6] = self.getTopT()[3:6] * -1
self.top_plate_pos = top_true @ tm([0, 0, self.top_plate_thickness, 0, 0, 0])
self.reorientTopPlate()
def validateLegs(self, valid = True, donothing = False):
"""
Validates leg lengths against leg minimums and maximums
Args:
valid (Bool): whether to start the validator with an assumption of prior validity
donothing (Bool): If set to true, even if an invalid configuration is detected,
will not attempt to correct it
Returns:
Bool: Validity of configuration
"""
if self.validation_settings[0]:
temp_valid = self.legLengthConstraint()
valid = valid and temp_valid
if not temp_valid:
self.validation_error += "Leg Length Constraint Violated "
if not temp_valid and not donothing:
if self.debug:
disp("Executing Length Corrective Action...")
self.lengthCorrectiveAction()
valid = self.validate(True, 1)
return valid
def validateContinuousTranslation(self, valid=True, donothing = False):
"""
Ensures that the top plate is always locally above the bottom plate
Args:
valid (Bool): whether to start the validator with an assumption of prior validity
donothing (Bool): If set to true, even if an invalid configuration is detected,
will not attempt to correct it
Returns:
Bool: Validity of configuration
"""
if self.validation_settings[1]:
temp_valid = self.continuousTranslationConstraint()
valid = valid and temp_valid
if not temp_valid:
self.validation_error += "Platform Inversion Constraint Violated "
if not temp_valid and not donothing:
if self.debug:
disp("Executing Continuous Translation Corrective Action...")
self.continuousTranslationCorrectiveAction()
valid = self.validate(True, 2)
return valid
def validateInteriorAngles(self, valid = True, donothing = False):
"""
Ensures that interior angles do not violate angular limits
Args:
valid (Bool): whether to start the validator with an assumption of prior validity
donothing (Bool): If set to true, even if an invalid configuration is detected,
will not attempt to correct it
Returns:
Bool: Validity of configuration
"""
if self.validation_settings[2]:
temp_valid = self.interiorAnglesConstraint()
valid = valid and temp_valid
if not temp_valid:
self.validation_error += "Interior Angles Constraint Violated "
if not temp_valid and not donothing:
if self.debug:
disp("Executing Interior Angles Corrective Action...")
self.IK(self.getBottomT(), self.getBottomT() @
self.nominal_plate_transform, protect = True)
valid = self.validate(True, 3)
return valid
def validatePlateRotation(self, valid = True, donothing = False):
"""
Ensures plate rotation does not validate limits
Args:
valid (Bool): whether to start the validator with an assumption of prior validity
donothing (Bool): If set to true, even if an invalid configuration is detected,
will not attempt to correct it
Returns:
Bool: Validity of configuration
"""
if self.validation_settings[3]:
temp_valid = self.plateRotationConstraint()
valid = valid and temp_valid
if not temp_valid:
self.validation_error += "Plate Tilt/Rotate Constraint Violated "
if not temp_valid and not donothing:
if self.debug:
disp("Executing Plate Rotation Corrective Action By Resetting Platform")
#disp(self.nominal_plate_transform)
self.IK(self.getBottomT(),(self.getBottomT() @
self.nominal_plate_transform), protect = True)
valid = self.validate(True, 4)
return valid
def validate(self, donothing = False, validation_limit = 4):
"""
Validate the current configuration of the stewart platform
Args:
donothing (Bool): If set to true, even if an invalid configuration is detected,
will not attempt to correct it
validation_limit (Int): Description of parameter `validation_limit`.
Returns:
Bool: Validity of configuration
"""
valid = True #innocent until proven INVALID
#if self.debug:
# disp("Validating")
#First check to make sure leg lengths are not exceeding limit points
if fsr.distance(self.getTopT(), self.getBottomT()) > 2 * self.nominal_height:
valid = False
if validation_limit > 0: valid = self.validateLegs(valid, donothing)
if validation_limit > 1: valid = self.validateContinuousTranslation(valid, donothing)
if validation_limit > 2: valid = self.validateInteriorAngles(valid, donothing)
if validation_limit > 3: valid = self.validatePlateRotation(valid, donothing)
if valid:
self.validation_error = ""
return valid
def plateRotationConstraint(self):
"""
Constraint for plate rotations. Assesses validity
Returns:
Bool: Validity of configuration
"""
valid = True
for i in range(3):
if self.current_plate_transform_local.gTM()[i, i] <= self.plate_rotation_limit - .0001:
if self.debug:
disp(self.current_plate_transform_local.gTM(), "Erroneous TM")
print([self.current_plate_transform_local.gTM()[i, i],
self.plate_rotation_limit])
valid = False
return valid
def legLengthConstraint(self):
"""
Evaluate Leg Length Limitations of Stewart Platform
Returns:
Bool: Validity of configuration
"""
valid = True
if(np.any(self.lengths < self.leg_ext_min) or np.any(self.lengths > self.leg_ext_max)):
valid = False
return valid
def rescaleLegLengths(self, current_leg_min, current_leg_max):
"""
Rescale leg lengths to meet minimums
Args:
current_leg_min (Float): current minimum leg length (may be invalid)
current_leg_max (Float): current maximum leg length (may be invalid)
"""
for i in range(6):
self.lengths[i] = ((self.lengths[i]-current_leg_min)/
(current_leg_max-current_leg_min) *
(min(self.leg_ext_max, current_leg_max) -
max(self.leg_ext_min, current_leg_min)) +
max(self.leg_ext_min, current_leg_min))
def addLegsToMinimum(self, current_leg_min, current_leg_max):
"""
Adds the difference to the leg below minimum to preserve end effector orientation
Args:
current_leg_min (Float): current minimum leg length (may be invalid)
current_leg_max (Float): current maximum leg length (may be invalid)
"""
boostamt = ((self.leg_ext_min-current_leg_min)+self.leg_ext_safety)
if self.debug:
print("Boost Amount: " + str(boostamt))
self.lengths += boostamt
def subLegsToMaximum(self, current_leg_min, current_leg_max):
"""
Subtracts the difference to the leg above maximum to preserve end effector orientation
Args:
current_leg_min (Float): current minimum leg length (may be invalid)
current_leg_max (Float): current maximum leg length (may be invalid)
"""
#print([current_leg_max, self.leg_ext_max, current_leg_min,
# self.leg_ext_min, current_leg_max -
# (current_leg_max - self.leg_ext_max + self.leg_ext_safety)])
self.lengths -= ((current_leg_max - self.leg_ext_max)+self.leg_ext_safety)
#print(self.lengths)
def lengthCorrectiveAction(self):
"""
Make an attempt to correct leg lengths that are out of bounds.
Will frequently result in a home-like position
"""
if self.debug:
disp(self.lengths, "Lengths Pre Correction")
disp(self.lengths[np.where(self.lengths > self.leg_ext_max)], "over max")
disp(self.lengths[np.where(self.lengths < self.leg_ext_min)], "below min")
current_leg_min = min(self.lengths.flatten())
current_leg_max = max(self.lengths.flatten())
#for i in range(6):
# self.lengths[i] = ((self.lengths[i]-current_leg_min)/
# (current_leg_max-current_leg_min) *
# (min(self.leg_ext_max, current_leg_max) -
# max(self.leg_ext_min, current_leg_min)) +
# max(self.leg_ext_min, current_leg_min))
if current_leg_min < self.leg_ext_min and current_leg_max > self.leg_ext_max:
self.rescaleLegLengths(current_leg_min, current_leg_max)
self.validation_error+= " CMethod: Rescale, "
elif (current_leg_min < self.leg_ext_min and
current_leg_max + (self.leg_ext_min - current_leg_min) +
self.leg_ext_safety < self.leg_ext_max):
self.addLegsToMinimum(current_leg_min, current_leg_max)
self.validation_error+= " CMethod: Boost, "
elif (current_leg_max > self.leg_ext_max and
current_leg_min - (current_leg_max - self.leg_ext_max) -
self.leg_ext_safety > self.leg_ext_min):
self.validation_error+= " CMethod: Subract, "
self.subLegsToMaximum(current_leg_min, current_leg_max)
else:
self.rescaleLegLengths(current_leg_min, current_leg_max)
self.validation_error+= " CMethod: Unknown Rescale, "
#self.lengths[np.where(self.lengths > self.leg_ext_max)] = self.leg_ext_max
#self.lengths[np.where(self.lengths < self.leg_ext_min)] = self.leg_ext_min
if self.debug:
disp(self.lengths, "Corrected Lengths")
#disp("HEre's what happened")
self.FK(self.lengths.copy(), protect = True)
#print(self.lengths)
def continuousTranslationConstraint(self):
"""
Ensure that the plate is above the prior
Returns:
Bool: Validity at configuration
"""
valid = True
bot = self.getBottomT()
for i in range(6):
if fsr.globalToLocal(self.getBottomT(), self.getTopT())[2] < 0:
valid = False
return valid
def continuousTranslationCorrectiveAction(self):
"""
Resets to home position
"""
self.IK(top_plate_pos = self.getBottomT() @ self.nominal_plate_transform, protect = True)
def interiorAnglesConstraint(self):
"""
Ensures no invalid internal angles
Returns:
Bool: Validity at configuration
"""
angles = abs(self.getJointAnglesFromNorm())
if(np.any(np.isnan(angles))):
return False
if(np.any(angles > self.joint_deflection_max)):
return False
return True
def getJointAnglesFromNorm(self):
"""
Returns the angular deviation of each angle socket from its nominal position in radians
Returns:
ndarray(Float): Angular deviation from home of each joint socket
"""
delta_angles_top = np.zeros((6))
delta_angles_bottom = np.zeros((6))
bottom_plate_transform = self.getBottomT()
top_plate_transform = self.getTopT()
for i in range(6):
top_joint_i = tm([
self.top_joints_space.T[i][0],
self.top_joints_space.T[i][1],
self.top_joints_space.T[i][2],
top_plate_transform[3],
top_plate_transform[4],
top_plate_transform[5]])
bottom_joint_i = tm([
self.bottom_joints_space.T[i][0],
self.bottom_joints_space.T[i][1],
self.bottom_joints_space.T[i][2],
bottom_plate_transform[3],
bottom_plate_transform[4],
bottom_plate_transform[5]])
#We have the relative positions to the top plate
# of the bottom joints (bottom angles) in home pose
#We have the relative positions to the bottom plate of
# the top joints (bottom_joint_angles_init) in home pose
bottom_to_top_local_home = self.bottom_joint_angles_init[i].copy()
top_to_bottom_local_home = self.bottom_joint_angles[i].copy()
#We acquire the current relative (local positions of each)
bottom_to_top_local = fsr.globalToLocal(self.getBottomT(), top_joint_i)
top_to_bottom_local = fsr.globalToLocal(self.getTopT(), bottom_joint_i)
#We acquire the base positions of each joint
bottom_to_bottom_local = fsr.globalToLocal(self.getBottomT(), bottom_joint_i)
top_to_top_local = fsr.globalToLocal(self.getTopT(), top_joint_i)
delta_angles_bottom[i] = fsr.angleBetween(
bottom_to_top_local,
bottom_to_bottom_local,
bottom_to_top_local_home)
delta_angles_top[i] = fsr.angleBetween(
top_to_bottom_local,
top_to_top_local,
top_to_bottom_local_home)
#DeltAnglesA are the Angles From Norm Bottom
#DeltAnglesB are the Angles from Norm TOp
return np.hstack((delta_angles_bottom, delta_angles_top))
def getJointAnglesFromVertical(self):
"""
Calculate joint angles from vertical at each joint
Returns:
ndarray(Float): top joints from vertical (downward)
ndarray(Float): bottom joints from vertical (upward)
"""
top_down = np.zeros((6))
bottom_up = np.zeros((6))
for i in range(6):
top_joints_temp = self.top_joints_space[:, i].copy().flatten()
top_joints_temp[2] = 0
bottom_joints_temp = self.bottom_joints_space[:, i].copy().flatten()
bottom_joints_temp[2] = bottom_joints_temp[2] + 1
angle = fsr.angleBetween(
self.bottom_joints_space[:, i],
self.top_joints_space[:, i],
top_joints_temp)
angle_up = fsr.angleBetween(
self.top_joints_space[:, i],
self.bottom_joints_space[:, i],
bottom_joints_temp)
top_down[i] = angle
bottom_up[i] = angle_up
return top_down, bottom_up
"""
______ _ _____ _
| ____| | | | __ \ (_)
| |__ ___ _ __ ___ ___ ___ __ _ _ __ __| | | | | |_ _ _ __ __ _ _ __ ___ _ ___ ___
| __/ _ \| '__/ __/ _ \/ __| / _` | '_ \ / _` | | | | | | | | '_ \ / _` | '_ ` _ \| |/ __/ __|
| | | (_) | | | (_| __/\__ \ | (_| | | | | (_| | | |__| | |_| | | | | (_| | | | | | | | (__\__ \
|_| \___/|_| \___\___||___/ \__,_|_| |_|\__,_| |_____/ \__, |_| |_|\__,_|_| |_| |_|_|\___|___/
__/ |
|___/
"""
def componentForces(self, tau):
"""
Calculate force components for given leg forces
Args:
tau (ndarray(Float)): force exerted through each leg in Newtons.
Returns:
ndarray(Float): vertical components of forces
ndarray(Float): horizontal components of forces
"""
vertical_components = np.zeros((6))
horizontal_components = np.zeros((6))
for i in range(6):
top_joint = self.top_joints_space[:, i].copy().flatten()
top_joint[2] = 0
angle = fsr.angleBetween(
self.bottom_joints_space[:, i],
self.top_joints_space[:, i],
top_joint)
vertical_force = tau[i] * np.sin(angle)
horizontal_force = tau[i] * np.cos(angle)
vertical_components[i] = vertical_force
horizontal_components[i] = horizontal_force
return vertical_components, horizontal_components
def bottomTopCheck(self, bottom_plate_pos, top_plate_pos):
"""
Checks to make sure that a bottom and top provided are not null
Args:
bottom_plate_pos (tm): bottom plate transformation in space frame
top_plate_pos (tm): top plate transformation in space frame
Returns:
tm: bottomm plate transformation in space frame
tm: top plate transformation in space frame
"""
if bottom_plate_pos == None:
bottom_plate_pos = self.getBottomT()
if top_plate_pos == None:
top_plate_pos = self.getTopT()
return bottom_plate_pos, top_plate_pos
def jacobianSpace(self, bottom_plate_pos = None, top_plate_pos = None):
"""
Calculates space jacobian for stewart platform. Takes in bottom transform and top transform
Args:
bottom_plate_pos (tm): bottom plate transformation in space frame
top_plate_pos (tm): top plate transformation in space frame
Returns:
ndarray(Float): Jacobian for current configuration
"""
#If not supplied paramters, draw from stored values
bottom_plate_pos, top_plate_pos = self.bottomTopCheck(bottom_plate_pos, top_plate_pos)
#Just invert the inverted
inverse_jacobian = self.inverseJacobianSpace(bottom_plate_pos, top_plate_pos)
return ling.pinv(inverse_jacobian)
def inverseJacobianSpace(self, bottom_plate_pos = None, top_plate_pos = None, protect = True):
"""
Calculates Inverse Jacobian for stewart platform. Takes in bottom and top transforms
Args:
bottom_plate_pos (tm): bottom plate transformation in space frame
top_plate_pos (tm): top plate transformation in space frame
protect (Bool): Boolean to bypass error detection and correction. Bypass if True
Returns:
ndarray(Float): Inverse Jacobian for current configuration
"""
#Ensure everything is kosher with the plates
bottom_plate_pos, top_plate_pos = self.bottomTopCheck(bottom_plate_pos, top_plate_pos)
#Store old values
old_bottom_plate_transform = self.getBottomT()
old_top_plate_transform = self.getTopT()
#Perform IK on bottom and top
self.IK(bottom_plate_pos, top_plate_pos, protect = protect)
#Create Jacobian
inverse_jacobian_transpose = np.zeros((6, 6))
for i in range(6):
#todo check sign on nim,
ni = fmr.Normalize(self.top_joints_space[:, i]-self.bottom_joints_space[:, i])
#Reverse for upward forces?
qi = self.bottom_joints_space[:, i]
col = np.hstack((np.cross(qi, ni), ni))
inverse_jacobian_transpose[:, i] = col
inverse_jacobian = inverse_jacobian_transpose.T
#Restore original Values
self.IK(old_bottom_plate_transform, old_top_plate_transform, protect = protect)
return inverse_jacobian
#Returns Top Down Jacobian instead of Bottom Up
def altInverseJacobianSpace(self,
bottom_plate_pos = None, top_plate_pos = None, protect = True):
"""
Returns top down jacobian instead of bottom up
Args:
bottom_plate_pos (tm): bottom plate transformation in space frame
top_plate_pos (tm): top plate transformation in space frame
protect (Bool): Boolean to bypass error detection and correction. Bypass if True
Returns:
ndarray(Float): top down Jacobian Space
"""
bottom_plate_pos, top_plate_pos = self.bottomTopCheck(bottom_plate_pos, top_plate_pos)
old_bottom_plate_transform = copy.copy(bottom_plate_pos)
old_top_plate_transform = copy.copy(top_plate_pos)
self.IK(bottom_plate_pos, top_plate_pos)
inverse_jacobian_transpose = np.zeros((6, 6))
for i in range(6):
ni = fmr.Normalize(self.bottom_joints_space[:, i]-self.top_joints_space[:, i])
qi = self.top_joints_space[:, i]
inverse_jacobian_transpose[:, i] = np.hstack((np.cross(qi, ni), ni))
inverse_jacobian = inverse_jacobian_transpose.conj().transpose()
self.IKHelper(old_bottom_plate_transform, old_top_plate_transform)
return inverse_jacobian
#Adds in actuator and plate forces, useful for finding forces on a full stack assembler
def carryMassCalc(self, twrench, protect=False):
"""
Calculates the forces on each leg given their masses,
masses of plates, and a wrench on the end effector.
Use this over Local in most cases
Args:
twrench (ndarray(Float)): input wrench for configuration
protect (Bool): Boolean to bypass error detection and correction. Bypass if True
Returns:
ndarray(Float): forces in Newtons for each leg
"""
wrench = twrench.copy()
wrench = wrench + fsr.makeWrench(self.getTopT(),
self.top_plate_newton_force, self.dir)
tau = self.measureForcesFromWrenchEE(self.getBottomT(),
self.getTopT(), wrench, protect = protect)
for i in range(6):
#print(self.getActuatorLoc(i, 't'))
wrench += fsr.makeWrench(self.getActuatorLoc(i, 't'),
self.act_shaft_newton_force, self.dir)
wrench += fsr.makeWrench(self.getActuatorLoc(i, 'b'),
self.act_motor_newton_force, self.dir)
wrench = wrench + fsr.makeWrench(self.getBottomT(),
self.bottom_plate_newton_force, self.dir)
return tau, wrench
def carryMassCalcLocal(self, twrench, protect = False):
"""
Perform force mass calculations in local frame
Args:
twrench (ndarray(Float)): input wrench for configuration
protect (Bool): Boolean to bypass error detection and correction. Bypass if True
Returns:
ndarray(Float): forces in Newtons for each leg
"""
#We will here assume that the wrench is in the local frame of the top platform.
wrench = twrench.copy()
wrench = wrench + fsr.makeWrench(tm(), self.top_plate_newton_force, self.dir)
tau = self.measureForcesAtEENew(wrench, protect = protect)
wrench_local_frame = fsr.transformWrenchFrame(wrench, self.getTopT(), self.getBottomT())
for i in range(6):
#print(self.getActuatorLoc(i, 't'))
#The following representations are equivalent.
wrench_local_frame += fsr.makeWrench(fsr.globalToLocal(self.getActuatorLoc(i, 't'),
self.getBottomT()), self.act_shaft_newton_force, self.dir)
wrench_local_frame += fsr.makeWrench(fsr.globalToLocal(self.getActuatorLoc(i, 'b'),
self.getBottomT()), self.act_motor_newton_force, self.dir)
#wrench_local_frame += fsr.transformWrenchFrame(fsr.makeWrench(tm(),
# self.act_shaft_newton_force, self.dir),
# self.getActuatorLoc(i, 't'), self.getBottomT())
#wrench_local_frame += fsr.transformWrenchFrame(fsr.makeWrench(tm(),
# self.act_motor_newton_force, self.dir),
# self.getActuatorLoc(i, 'b'), self.getBottomT())
wrench_local_frame = wrench_local_frame + fsr.makeWrench(tm(),
self.bottom_plate_newton_force, self.dir)
return tau, wrench_local_frame
def measureForcesAtEENew(self, wrench, protect = False):
"""
Measure forces based on end effector wrench
Args:
wrench (ndarray(Float)): Description of parameter `wrench`.
protect (Bool): Boolean to bypass error detection and correction. Bypass if True
Returns:
ndarray(Float): forces in Newtons for each leg
"""
jacobian_space = ling.pinv(
self.inverseJacobianSpace(self.getBottomT(), self.getTopT(), protect = protect))
tau = jacobian_space.T @ wrench
self.leg_forces = tau
return tau
def carryMassCalcUp(self, twrench, protect = False):
"""
Carry masses from bottom up
Args:
twrench (ndarray(Float)): input wrench for configuration
protect (Bool): Boolean to bypass error detection and correction. Bypass if True
Returns:
ndarray(Float): forces in Newtons for each leg
ndarray(Float): wrench to carry
"""
wrench = twrench.copy()
wrench = wrench + fsr.makeWrench(self.getBottomT(),
self.bottom_plate_mass * self.grav, np.array([0, 0, -1]))
tau = self.measureForcesFromBottomEE(
self.getBottomT(), self.getTopT(), wrench, protect = protect)
for i in range(6):
wrench += fsr.makeWrench(
self.getActuatorLoc(i, 't'), self.act_shaft_mass * self.grav, np.array([0, 0, -1]))
wrench += fsr.makeWrench(
self.getActuatorLoc(i, 'b'), self.act_motor_mass * self.grav, np.array([0, 0, -1]))
wrench = wrench + fsr.makeWrench(
self.getTopT(), self.top_plate_mass * self.grav, np.array([0, 0, -1]))
return tau, wrench
#Get Force wrench from the End Effector Force
def measureForcesFromWrenchEE(self, bottom_plate_pos = np.zeros((1)),
top_plate_pos = np.zeros((1)), top_plate_wrench = np.zeros((1)), protect = True):
"""
Calculates forces on legs given end effector wrench
Args:
bottom_plate_pos (tm): bottom plate transformation in space frame
top_plate_pos (tm): top plate transformation in space frame
top_plate_wrench (ndarray(Float)): input wrench for configuration
protect (Bool): Boolean to bypass error detection and correction. Bypass if True
Returns:
tau: forces in Newtons for each leg
"""
bottom_plate_pos, top_plate_pos = self.bottomTopCheck(bottom_plate_pos, top_plate_pos)
if top_plate_wrench.size < 6:
disp("Please Enter a Wrench")
#top_wrench = fmr.Adjoint(ling.inv(top_plate_pos)).conj().transpose() @ top_plate_wrench
#Modern Robotics 3.95 Fb = Ad(Tba)^T * Fa
#top_wrench = top_plate_pos.inv().Adjoint().T @ top_plate_wrench
top_wrench = fsr.transformWrenchFrame(top_plate_wrench, tm(), top_plate_pos)
jacobian_space = ling.pinv(
self.inverseJacobianSpace(bottom_plate_pos, top_plate_pos, protect = protect))
tau = jacobian_space.T @ top_wrench
self.leg_forces = tau
return tau
def measureForcesFromBottomEE(self, bottom_plate_pos = np.zeros((1)),
top_plate_pos = np.zeros((1)), top_plate_wrench = np.zeros((1)), protect = True):
"""
Calculates forces on legs given end effector wrench
Args:
bottom_plate_pos (tm): bottom plate transformation in space frame
top_plate_pos (tm): top plate transformation in space frame
top_plate_wrench (ndarray(Float)): input wrench for configuration
protect (Bool): Boolean to bypass error detection and correction. Bypass if True
Returns:
tau: forces in Newtons for each leg
"""
bottom_plate_pos, top_plate_pos = self._bttomTopCheck(bottom_plate_pos, top_plate_pos)
if top_plate_wrench.size < 6:
disp("Please Enter a Wrench")
#top_wrench = fmr.Adjoint(ling.inv(top_plate_pos)).conj().transpose() @ top_plate_wrench
bottom_wrench = bottom_plate_pos.inv().Adjoint().T @ top_plate_wrench
jacobian_space = ling.pinv(
self.inverseJacobianSpace(bottom_plate_pos, top_plate_pos, protect = protect))
tau = jacobian_space.T @ bottom_wrench
self.leg_forces = tau
return tau
def wrenchEEFromMeasuredForces(self, bottom_plate_pos, top_plate_pos, tau):
"""
Calculates wrench on end effector from leg forces
Args:
bottom_plate_pos (tm): bottom plate transformation in space frame
top_plate_pos (tm): top plate transformation in space frame
tau (ndarray(Float)): force exerted through each leg in Newtons.
Returns:
ndarray(Float): top plate wrench
ndarray(Float): top wrench (local)
ndarray(Float): jacobian
"""
self.leg_forces = tau
jacobian_space = ling.pinv(self.inverseJacobianSpace(bottom_plate_pos, top_plate_pos))
top_wrench = ling.inv(jacobian_space.conj().transpose()) @ tau
#self.top_plate_wrench = fmr.Adjoint(top_plate_pos).conj().transpose() @ top_wrench
self.top_plate_wrench = top_plate_pos.Adjoint().conj().transpose() @ top_wrench
return self.top_plate_wrench, top_wrench, jacobian_space
def wrenchBottomFromMeasuredForces(self, bottom_plate_pos, top_plate_pos, tau):
"""
Unused. Calculates wrench on the bottom plate from leg forces
Args:
bottom_plate_pos (tm): bottom plate transformation in space frame
top_plate_pos (tm): top plate transformation in space frame
tau (ndarray(Float)): force exerted through each leg in Newtons.
Returns:
ndarray(Float): bottom plate wrench
ndarray(Float): bottom wrench (local)
ndarray(Float): jacobian
"""
self.leg_forces = tau
jacobian_space = ling.pinv(self.altInverseJacobianSpace(bottom_plate_pos, top_plate_pos))
bottom_wrench = ling.inv(jacobian_space.conj().transpose()) @ tau
#self.bottom_plate_wrench = fmr.Adjoint(bottom_plate_pos).conj().transpose() @ bottom_wrench
self.bottom_plate_wrench = bottom_plate_pos.Adjoint().conj().transpose() @ bottom_wrench
return self.bottom_plate_wrench, bottom_wrench, jacobian_space
def sumActuatorWrenches(self, forces = None):
"""
Sum all actuator wrenches to produce bottom wrench
Args:
forces (ndarray(Float)): leg forces in Newtons
Returns:
ndarray(Float): bottom plate wrench
"""
if forces is None:
forces = self.leg_forces
wrench = fsr.makeWrench(tm(), 0, [0, 0, -1])
for i in range(6):
unit_vector = fmr.Normalize(self.bottom_joints_space[:, i]-self.top_joints_space[:, i])
wrench += fsr.makeWrench(self.top_joints_space[:, i], float(forces[i]), unit_vector)
#wrench = fsr.transformWrenchFrame(wrench, tm(), self.getTopT())
return wrench
def move(self, T, protect = False):
"""
Move entire Assembler Stack to another location and orientation
This function and syntax are shared between all kinematic structures.
Args:
T (tm): New base transform to move to
protect (Bool): Boolean to bypass error detection and correction. Bypass if True
"""
#Moves the base of the stewart platform to a new location
self.current_plate_transform_local = fsr.globalToLocal(self.getBottomT(), self.getTopT())
self.bottom_plate_pos = T.copy()
self.IK(
top_plate_pos = fsr.localToGlobal(self.getBottomT(),
self.current_plate_transform_local),
protect = protect)
def printOutOfDateFunction(self, old_name, use_name):
"""
Prints an old function with an OOD notice
Args:
old_name (String): Description of parameter `old_name`.
use_name (String): Description of parameter `use_name`.
"""
print(old_name + " is deprecated. Please use " + use_name + " instead.")
def SetMasses(self, plateMass, actuatorTop, actuatorBottom, grav = 9.81, tPlateMass = 0):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("SetMasses", "setMasses")
return self.setMasses(plateMass, actuatorTop, actuatorBottom, grav, tPlateMass)
def SetGrav(self, grav = 9.81):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("SetGrav", "setGrav")
return self.setGrav(grav)
def SetCOG(self, motor_grav_center, shaft_grav_center):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("SetCOG", "setCOG")
return self.setCOG(motor_grav_center, shaft_grav_center)
def SetAngleDev(self, MaxAngleDev = 55):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("SetAngleDev", "setMaxAngleDev")
return self.setMaxAngleDev(MaxAngleDev)
def SetPlateAngleDev(self, MaxPlateDev = 60):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("SetPlateAngleDev", "setMaxPlateRotation")
return self.setMaxPlateRotation(MaxPlateDev)
def SetDrawingDimensions(self, OuterTopRad, OuterBotRad, ShaftRad, MotorRad):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("SetDrawingDimensions", "setDrawingDimensions")
return self.setDrawingDimensions( OuterTopRad, OuterBotRad, ShaftRad, MotorRad)
def _setPlatePos(self, bottomT, topT):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("_setPlatePos", "setPlatePos")
return self.setPlatePos(bottomT, topT)
def gLens(self):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("gLens", "getLens")
return self.getLens()
def gtopT(self):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("gtopT", "getTopT")
return self.getTopT()
def gbottomT(self):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("gbottomT", "getBottomT")
return self.getBottomT()
def GetActuatorUnit(self, p1, p2, dist):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("GetActuatorUnit", "fsr.getUnitVec")
return fsr.getUnitVec(p1, p2, dist)
def SpinCustom(self, rot):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("SpinCustom", "spinCustom")
return self.spinCustom(rot)
def SimplifiedRaphson(self, L, bottomT = None, reverse = False, protect = False):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("SimplifiedRaphson", "simplifiedRaphson")
return self.simplifiedRaphson(L, bottomT, reverse, protect)
def LambdaRTP(self, stopt):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("LambdaRTP", "lambdaTopPlateReorientation")
return self.lambdaTopPlateReorientation(stopt)
def ReorientTopPlate(self):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("ReorientTopPlate", "reorientTopPlate")
return self.reorientTopPlate()
def _legLengthConstraint(self, donothing):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("_legLengthConstraint", "legLengthConstraint")
return self.legLengthConstraint()
def _resclLegs(self, cMin, cMax):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("_resclLegs", "rescaleLegLengths")
return self.rescaleLegLengths(cMin, cMax)
def _addLegs(self, cMin, cMax):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("_addLegs", "addLegsToMinimum")
return self.addLegsToMinimum(cMin, cMax)
def _subLegs(self, cMin, cMax):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("_subLegs", "subLegsToMaximum")
return self.subLegsToMaximum(cMin, cMax)
def _lengthCorrectiveAction(self):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("_lengthCorrectiveAction", "lengthCorrectiveAction")
return self.lengthCorrectiveAction()
def _continuousTranslationConstraint(self):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction(
"_continuousTranslationConstraint", "continuousTranslationConstraint")
return self.continuousTranslationConstraint()
def _continuousTranslationCorrectiveAction(self):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction(
"_continuousTranslationCorrectiveAction", "continuousTranslationCorrectiveAction")
return self.continuousTranslationCorrectiveAction()
def _interiorAnglesConstraint(self):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("_interiorAnglesConstraint", "interiorAnglesConstraint")
return self.interiorAnglesConstraint()
def AngleFromNorm(self):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("AngleFromNorm", "getJointAnglesFromNorm")
return self.getJointAnglesFromNorm()
def AngleFromVertical(self):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("AngleFromVertical", "getJointAnglesFromVertical")
return self.getJointAnglesFromVertical()
def _bottomTopCheck(self, bottomT, topT):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("_bottomTopCheck", "bottomTopCheck")
return self.bottomTopCheck(bottomT, topT)
def JacobianSpace(self, bottomT = None, topT = None):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("JacobianSpace", "jacobianSpace")
return self.jacobianSpace(bottomT, topT)
def InverseJacobianSpace(self, bottomT = None, topT = None, protect = True):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("InverseJacobianSpace", "inverseJacobianSpace")
return self.inverseJacobianSpace(bottomT, topT)
def AltInverseJacobianSpace(self, bottomT = None, topT = None, protect = True):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("AltInverseJacobianSpace", "altInverseJacobianSpace")
return self.altInverseJacobianSpace(bottomT, topT, protect)
def CarryMassCalc(self, twrench, protect = False):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("CarryMassCalc", "carryMassCalc")
return self.carryMassCalc(twrench, protect)
def CarryMassCalcNew(self, twrench, protect = False):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("CarryMassCalcNew", "carryMassCalcLocal")
return self.carryMassCalcLocal(twrench, protect)
def MeasureForcesAtEENew(self, wrench, protect = False):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("MeasureForcesAtEENew", "measureForcesAtEENew")
return self.measureForcesAtEENew(wrench, protect)
def CarryMassCalcUp(self, twrench, protect = False):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("CarryMassCalcUp", "carryMassCalcUp")
return self.carryMassCalcUp(twrench, protect)
def MeasureForcesFromWrenchEE(self, bottomT = np.zeros((1)) ,
topT = np.zeros((1)), topWEE = np.zeros((1)), protect = True):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("MeasureForcesFromWrenchEE", "measureForcesFromWrenchEE")
return self.measureForcesFromWrenchEE(bottomT, topT, topWEE, protect)
def MeasureForcesFromBottomEE(self, bottomT = np.zeros((1)) ,
topT = np.zeros((1)), topWEE = np.zeros((1)), protect = True):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("MeasureForcesFromBottomEE", "measureForcesFromBottomEE")
return self.measureForcesFromBottomEE(bottomT, topT, topWEE, protect)
def WrenchEEFromMeasuredForces(self, bottomT, topT, tau):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("WrenchEEFromMeasuredForces", "wrenchEEFromMeasuredForces")
return self.wrenchEEFromMeasuredForces(bottomT, topT, tau)
def WrenchBottomFromMeasuredForces(self, bottomT, topT, tau):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction(
"WrenchBottomFromMeasuredForces", "wrenchBottomFromMeasuredForces")
return self.wrenchBottomFromMeasuredForces(bottomT, topT, tau)
def SumActuatorWrenches(self, forces = None):
"""
Deprecated. Don't Use
"""
self.printOutOfDateFunction("SumActuatorWrenches", "sumActuatorWrenches")
return self.sumActuatorWrenches(forces)
def loadSP(fname, file_directory = "../robot_definitions/", baseloc = None, altRot = 1):
"""
Loads A Stewart Platform Object froma file
Args:
fname (String): file name of the sp config
file_directory (String): optional directory, defaults to robot_defintions
baseloc (tm): Base location.
altRot (Float): alternate relative plate rotation.
Returns:
SP: SP object
"""
print(fname)
print(file_directory)
total_name = file_directory + fname
print(total_name)
with open(total_name, "r") as sp_file:
sp_data = json.load(sp_file)
bot_radius = sp_data["BottomPlate"]["JointRadius"] #Radius of Ball Joint Circle in Meters
top_radius = sp_data["TopPlate"]["JointRadius"]
bot_joint_spacing = sp_data["BottomPlate"]["JointSpacing"] #Spacing in Degrees
top_joint_spacing = sp_data["TopPlate"]["JointSpacing"]
bot_thickness = sp_data["BottomPlate"]["Thickness"]
top_thickness = sp_data["TopPlate"]["Thickness"]
outer_top_radius = sp_data["Drawing"]["TopRadius"]
outer_bottom_radius = sp_data["Drawing"]["BottomRadius"]
act_shaft_radius = sp_data["Drawing"]["ShaftRadius"]
act_motor_radius = sp_data["Drawing"]["MotorRadius"]
actuator_shaft_mass = 0
actuator_motor_mass = 0
plate_top_mass = 0
plate_bot_mass = 0
motor_grav_center = 0
shaft_grav_center = 0
name = sp_data["Name"]
actuator_min = sp_data["Actuators"]["MinExtension"] #meters
actuator_max = sp_data["Actuators"]["MaxExtension"]
force_lim = sp_data["Actuators"]["ForceLimit"]
max_dev = sp_data["Settings"]["MaxAngleDev"]
if sp_data["Settings"]["AssignMasses"] == 1:
actuator_motor_mass = sp_data["Actuators"]["MotorMass"]
actuator_shaft_mass = sp_data["Actuators"]["ShaftMass"]
plate_top_mass = sp_data["TopPlate"]["Mass"]
plate_bot_mass = sp_data["BottomPlate"]["Mass"]
if sp_data["Settings"]["InferActuatorCOG"] == 1:
motor_grav_center = sp_data["Actuators"]["MotorCOGD"]
shaft_grav_center = sp_data["Actuators"]["ShaftCOGD"]
else:
inferred_cog = 1/4 * (actuator_min+actuator_max)/2
actuator_shaft_mass = inferred_cog
motor_grav_center = inferred_cog
if baseloc == None:
baseloc = tm()
newsp = newSP(bot_radius, top_radius, bot_joint_spacing, top_joint_spacing,
bot_thickness, top_thickness, actuator_shaft_mass, actuator_motor_mass, plate_top_mass,
plate_bot_mass, motor_grav_center, shaft_grav_center,
actuator_min, actuator_max, baseloc, name, altRot)
newsp.setDrawingDimensions(
outer_top_radius,
outer_bottom_radius,
act_shaft_radius,
act_motor_radius)
newsp.setMaxAngleDev(max_dev)
newsp.force_limit = force_lim
return newsp
def newSP(bottom_radius, top_radius, bJointSpace, tJointSpace,
bottom_plate_thickness, top_plate_thickness, actuator_shaft_mass,
actuator_motor_mass, plate_top_mass, plate_bot_mass, motor_grav_center,
shaft_grav_center, actuator_min, actuator_max, base_location, name, rot = 1):
"""
Builds a new SP, called usually by a constructor
Args:
bottom_radius (Float): Bottom plate Radius (m)
top_radius (Float): Top plate Radius (m)
bJointSpace (ndarray(Float)): bottom joints space locations
tJointSpace (ndarray(Float)): top joints space locations
bottom_plate_thickness (Float): bottom plate thickness (m)
top_plate_thickness (Float): top plate thickness (m)
actuator_shaft_mass (Float): Actuator shaft (moving portion) mass Kg
actuator_motor_mass (Float): Actuator motor (stationary portion) mass Kg
plate_top_mass (Float): top plate mass (Kg)
plate_bot_mass (Float): bottom plate mass (Kg)
motor_grav_center (Float): Actuator motor inline COG distance from joint
shaft_grav_center (Float): Actuator shaft inline CG distance from top joint
actuator_min (Float): Actuator length when fully retracted
actuator_max (Float): Actuator length when fully extended
base_location (tm): Base transform
name (String): Name of the SP
rot (Float): Rotation parameter
Returns:
SP: SP object
"""
bottom_gap = bJointSpace / 2 * np.pi / 180
top_gap = tJointSpace / 2 * np.pi / 180
bottom_joint_gap = 120 * np.pi / 180 #Angle of seperation between joint clusters
top_joint_gap = 60 * np.pi / 180 #Offset in rotation of the top plate versus the bottom plate
bangles = np.array([
-bottom_gap, bottom_gap,
bottom_joint_gap-bottom_gap,
bottom_joint_gap+bottom_gap,
2*bottom_joint_gap-bottom_gap,
2*bottom_joint_gap+bottom_gap])
tangles = np.array([
-top_joint_gap+top_gap,
top_joint_gap-top_gap,
top_joint_gap+top_gap,
top_joint_gap+bottom_joint_gap-top_gap,
top_joint_gap+bottom_joint_gap+top_gap,
-top_joint_gap-top_gap])
if rot == -1:
tangles = np.array([
-bottom_gap, bottom_gap,
bottom_joint_gap-bottom_gap,
bottom_joint_gap+bottom_gap,
2*bottom_joint_gap-bottom_gap,
2*bottom_joint_gap+bottom_gap])
bangles = np.array([
-top_joint_gap+top_gap,
top_joint_gap-top_gap,
top_joint_gap+top_gap,
top_joint_gap+bottom_joint_gap-top_gap,
top_joint_gap+bottom_joint_gap+top_gap,
-top_joint_gap-top_gap])
S = fmr.ScrewToAxis(np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 1.0]), 0).reshape((6, 1))
Mb = tm(np.array([bottom_radius, 0.0, 0.0, 0.0, 0.0, 0.0]))
#how far from the bottom plate origin should clusters be generated
Mt = tm(np.array([top_radius, 0.0, 0.0, 0.0, 0.0, 0.0]))
#Same thing for the top
bj = np.zeros((3, 6)) #Pre allocate arrays
tj = np.zeros((3, 6))
for i in range(0, 6):
bji = fsr.transformFromTwist(bangles[i] * S) @ Mb
tji = fsr.transformFromTwist(tangles[i] * S) @ Mt
bj[0:3, i] = bji[0:3].reshape((3))
tj[0:3, i] = tji[0:3].reshape((3))
bj[2, i] = bottom_plate_thickness
tj[2, i] = -top_plate_thickness
bottom = base_location.copy()
tentative_height = midHeightEstimate(
actuator_min, actuator_max, bj, bottom_plate_thickness, top_plate_thickness)
if rot == -1:
tentative_height = midHeightEstimate(
actuator_min, actuator_max, tj, bottom_plate_thickness, top_plate_thickness)
top = bottom @ tm(np.array([0.0, 0.0, tentative_height, 0.0, 0.0, 0.0]))
newsp = SP(bj, tj, bottom, top,
actuator_min, actuator_max,
bottom_plate_thickness, top_plate_thickness, name)
newsp.setMasses(
plate_bot_mass,
actuator_shaft_mass,
actuator_motor_mass,
top_plate_mass = plate_top_mass)
newsp.setCOG(motor_grav_center, shaft_grav_center)
return newsp
def makeSP(bRad, tRad, spacing, baseT,
platOffset, rot = -1, plate_thickness_avg = 0, altRot = 0):
"""
Largely deprecated in favor of Loading SP objects from json
Args:
bRad (Float): bottom plate radius
tRad (Float): top plate radius
spacing (Float): joint spacing (deg)
baseT (tm):base transform
platOffset (Float): platform offset height
rot (Float): creates an invert platform if flipped
plate_thickness_avg (Float): plate thickness
altRot (Float): rotational offset
Returns:
SP: Stewart platform object
"""
gapS = spacing/2*np.pi/180 #Angle between cluster joints
bottom_joint_gap = 120*np.pi/180 #Angle of seperation between joint clusters
top_joint_gap = 60*np.pi/180 #Offset in rotation of the top plate versus the bottom plate
bangles = np.array([
-gapS,
gapS,
bottom_joint_gap-gapS,
bottom_joint_gap+gapS,
2*bottom_joint_gap-gapS,
2*bottom_joint_gap+gapS]) + altRot * np.pi/180
tangles = np.array([
-top_joint_gap+gapS,
top_joint_gap-gapS,
top_joint_gap+gapS,
top_joint_gap+bottom_joint_gap-gapS,
top_joint_gap+bottom_joint_gap+gapS,
-top_joint_gap-gapS])+ altRot * np.pi/180
if rot == -1:
tangles = np.array([
-gapS, gapS,
bottom_joint_gap-gapS,
bottom_joint_gap+gapS,
2*bottom_joint_gap-gapS,
2*bottom_joint_gap+gapS])+ altRot * np.pi/180
bangles = np.array([
-top_joint_gap+gapS,
top_joint_gap-gapS,
top_joint_gap+gapS,
top_joint_gap+bottom_joint_gap-gapS,
top_joint_gap+bottom_joint_gap+gapS,
-top_joint_gap-gapS])+ altRot * np.pi/180
disp(bangles, "bangles")
disp(tangles, "tangles")
S = fmr.ScrewToAxis(np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 1.0]), 0).reshape((6, 1))
Mb = tm(np.array([bRad, 0.0, 0.0, 0.0, 0.0, 0.0]))
#how far from the bottom plate origin should clusters be generated
Mt = tm(np.array([tRad, 0.0, 0.0, 0.0, 0.0, 0.0]))
#Same thing for the top
bj = np.zeros((3, 6)) #Pre allocate arrays
tj = np.zeros((3, 6))
#Generate position vectors (XYZ) for top and bottom joint locations
for i in range(0, 6):
bji = fsr.transformFromTwist(bangles[i] * S) @ Mb
tji = fsr.transformFromTwist(tangles[i] * S) @ Mt
bj[0:3, i] = bji[0:3].reshape((3))
tj[0:3, i] = tji[0:3].reshape((3))
bj[2, i] = plate_thickness_avg/2
tj[2, i] = -plate_thickness_avg/2
#if rot == -1:
# disp(bj, "Prechange")
#
# rotby = TAAtoTM(np.array([0, 0, 0, 0, 0, np.pi/3]))
# for i in range(6):
# bj[0:3, i] = TMtoTAA(rotby @
# TAAtoTM(np.array([bj[0, i], bj[1, i], bj[2, i], 0, 0, 0])))[0:3].reshape((3))
# tj[0:3, i] = TMtoTAA(rotby @
# TAAtoTM(np.array([tj[0, i], tj[1, i], tj[2, i], 0, 0, 0])))[0:3].reshape((3))
# disp(bj, "postchange")
bottom = baseT.copy()
#Generate top position at offset from the bottom position
top = bottom @ tm(np.array([0.0, 0.0, platOffset, 0.0, 0.0, 0.0]))
sp = SP(bj, tj, bottom, top, 0, 1, plate_thickness_avg, plate_thickness_avg, 'sp')
sp.bRad = bRad
sp.tRad = tRad
return sp, bottom, top
#Helpers
def midHeightEstimate(leg_ext_min, leg_ext_max, bj, bth, tth):
"""
Calculates an estimate of thee resting height of a stewart plaform
Args:
leg_ext_min (float): minimum leg extension
leg_ext_max (float): maximum leg extension
bj (array(float)): bottom joints
bth (tm):bottom plate thickness
tth (tm): top plate thickness
Returns:
Float: Description of returned object.
"""
s1 = (leg_ext_min + leg_ext_max) / 2
d1 = fsr.distance(tm([bj[0, 0], bj[1, 0], bj[2, 0], 0, 0, 0]),
tm([bj[0, 1], bj[1, 1], bj[2, 1], 0, 0, 0]))
hest = (np.sqrt(s1 ** 2 - d1 **2)) + bth + tth
return hest
``` |
{
"source": "64-B1T/faser_utils",
"score": 4
} |
#### File: faser_utils/disp/disp.py
```python
import time
import datetime
def disp(matrix, title = "MATRIX", nd = 3, mode = 0, pdims = True, noprint = False):
"""
Drop in replacement for python print. Operates like Matlab's disp() function.
Takes in an object to print, a title, and an optional mode
Args:
matrix: the item to be printed (does not have to be a matrix)
title: An optional title or caption to be applied to the printed item
nd: number of decimal places
mode: whether or not to print the context in latex table format
pdims: print dimensions
noprint: simply return a string without printing anything
Returns:
String
"""
matstr = ""
if mode == 0:
matstr = dispa(matrix, title, nd, pdims)[:-1]
else:
matstr = disptex(matrix, title, nd)[:-1]
if not noprint:
print(matstr)
return matstr
def dispa(matrix, title = "MATRIX", nd = 3, pdims = True, h="", new = True):
"""
Helper function for disp, used recursively
Args:
matrix: item to be printed
nd: number of decimal places
pdims: print dimensions
h: existing string
new: if this is a new call
Returns:
String
"""
t_bar = ""
t_tl = "╔"
t_bl = "╚"
#╚╔╝╗║═ Symbols Required
#Accounts for Even or Odd amounts of letters in a title
if (len(title) % 2 == 0):
t_tl = t_tl + "═"
t_bl = t_bl + "═"
strr = ""
#Accounts for a List of Objects, Calls itself Recursively
if hasattr(matrix, 'TM'):
return dispa(matrix.TAA, title)
if isinstance(matrix, list):
alltf = True
for mat in matrix:
if not hasattr(mat, 'TM'):
alltf = False
if alltf == True:
return printTFlist(matrix, title, nd)
i = 0
str1 = (t_tl + "════════════" + " " + title + " BEGIN " + "════════════" + "╗\n")
strm = ""
for mat in matrix:
if ~isinstance(mat, list) and ~isinstance(mat, tuple) and hasattr(matrix, 'TM'):
strm += (str(mat) + "\n")
else:
if pdims:
strm+=("Dim " + str(i) + ":\n")
strm += dispa(mat)
i = i + 1
str2 = (t_bl + t_bar + "════════════" + title + " END ═" + "════════════" + "╝\n")
return str1 + strm + str2;
shape = 0
#Variety of try catch to prevent crashes due to liberal use of disp()
try:
try:
shape = matrix.shape
except:
#Panda obects IIRC use shape as a method
shape = matrix.shape()
dims = len(shape)
if dims >= 2:
t_key = shape[dims - 1]
else:
t_key = max(shape)
if new and title != "MATRIX":
strr+= title + ": "
except:
#If all else fails, print Normally
if title != "MATRIX":
strr += (title + ": ")
strr += (str(matrix) + "\n")
return strr
#Formats correct number of top and bottom markers for t_bar
while(len(title) + 8 + (len(t_bar) * 2)) < (t_key * (nd + 7) ):
t_bar = t_bar + "═"
#Prints a single Dimension Vector
if dims == 1:
cn = 0
if h == "╔ ":
cn = 1
elif h == "╚ ":
cn = 2
else:
h = h + "║ "
for i in range(shape[0]):
t_nd = nd
if (abs(matrix[i]) >= 9999):
nm = len(str(abs(round(matrix[i]))))
while t_nd > 0 and nm > 6:
t_nd = t_nd - 1
nm = nm - 1
fmat = "{:" + str(nd + 6) +"." + str(t_nd) + "f}"
h = h + fmat.format(matrix[i])
if i != shape[0] - 1:
h = h + ","
if cn == 0:
h = h + " ║"
elif cn == 1:
h = h + " ╗"
else:
h = h + " ╝"
strr+= (str(h) + "\n")
#Prints traditional Square Matrix, allows for title
elif dims == 2:
if title != "MATRIX":
strr+=(t_tl + t_bar + " " + title + " BEGIN " + t_bar + "╗\n")
for i in range(shape[0]):
if i == 0:
strr += dispa(matrix[i,], nd = nd, h = "╔ ", new = False)
elif i == shape[0] - 1:
strr += dispa(matrix[i,], nd = nd, h = "╚ ", new = False)
else:
strr += dispa(matrix[i,], nd = nd, new = False)
if title != "MATRIX":
strr+=(t_bl + t_bar + "═ " + title + " END ═" + t_bar + "╝\n")
#Prints 3D Matrix by calling 2D recursively
elif dims == 3:
strr += (t_tl + t_bar + " " + title + " BEGIN " + t_bar + "╗\n")
for i in range(shape[0]):
if pdims:
strr += ("DIM " + str(i) + ":\n")
strr += dispa(matrix[i,], nd = nd, new = False)
strr += (t_bl + t_bar + "═ " + title + " END ═" + t_bar + "╝\n")
#Prints 4D Matrix by calling 3D recursively
elif dims == 4:
strr += (t_tl + t_bar + "══ " + title + " BEGIN ══" + t_bar + "╗\n")
for i in range(shape[0]):
strr += dispa(matrix[i,], nd = nd, title = title + " d:" + str(i), pdims = pdims, new = False)
strr += (t_bl + t_bar + "═══ " + title + " END ═══" + t_bar + "╝\n")
else:
taux = "═" * (dims - 3)**2
strr += (t_tl + t_bar + taux +" " + title + " BEGIN " + taux + t_bar + "╗\n")
for i in range(shape[0]):
strr += dispa(matrix[i,], title = title + " s" + str(i), new = False)
strr += (t_bl + t_bar + taux + "═ " + title + " END ═" + taux + t_bar + "╝\n")
return strr
#More dimensions can be added as needed if neccessary
def disptex(matrix, title, nd = 3, pdims = True, h=""):
"""
Prints a matrix in latex format.
Args:
matrix: matrix to be printed
title: caption
nd: number of decimal places to round to
pdims: print dimensions
h: existing string
Returns:
String
"""
try:
shape = matrix.shape
except:
return dispa(matrix, title)
strr = "\\begin{table}\n\\centering\n\\begin{tabular}{|"
for i in range(shape[1]):
strr = strr + " c "
if i == shape[1] - 1:
strr = strr + ("|}\n\\hline\n")
strr+="\\toprule\n"
strr+="%INSERT CAPTIONS HERE\n"
strr+="\\midrule\n"
for i in range(shape[0]):
#strr+= "\\hline\n"
for j in range(shape[1]):
strr+= str(round(matrix[i, j], nd))
if j != shape[1] - 1:
strr+=" & "
continue
else:
break
strr+="\\\\\n"
strr+="\\bottomrule\n"
strr+="\\end{tabular}\n\\caption{" + title + "}\n\\end{table}\n"
return strr
def printTFlist(matrix, title, nd):
"""
Prints a list of TM objects (TF was deprecated)
Args:
matrix: list of tms to be printed
title: caption
nd: number of decimal places to round to
Returns:
String
"""
strr = "╔"
nTF = len(matrix)
tLen = (2 * nTF * (nd+1) + (2*nTF+1))
j = 0
for i in range(round((2 * nTF * (nd+1) + (2*nTF+1))/2 - len(title)/2 - 1)):
j+=1
strr+="═"
strr += (" " + title + " ")
j+= 2 + len(title)
for i in range(j, tLen):
strr+="═"
strr += "╗\n"
strr+= "╠═"
for i in range(nTF):
#strr += "╔"
for j in range(nd + 6):
strr+="═"
if i != nTF - 1:
strr += "╦"
strr+="═╣\n"
for j in range(6):
strr+= "║ "
for i in range(len(matrix)):
t_nd = nd
if (abs(matrix[i][j]) >= 9999):
nm = len(str(abs(round(matrix[i][j]))))
while t_nd > 0 and nm > 6:
t_nd = t_nd - 1
nm = nm - 1
fmat = "{:" + str(nd + 6) +"." + str(t_nd) + "f}"
strr = strr + fmat.format(matrix[i][j])
if i != nTF - 1:
strr = strr + ","
strr+=" ║\n"
strr+= "╠═"
for i in range(nTF):
#strr += "╚"
for j in range(nd + 6):
strr+="═"
if i != nTF - 1:
strr += "╩"
strr+="═╣\n"
strr +="╚"
for i in range(2 * nTF * (nd+1) + (2*nTF+1)):
strr+="═"
strr += "╝\n"
return strr
def progressBar(iteration, total, prefix = '', suffix = '', decimals = 1, length = 100, fill = '#', ETA=None):
"""
Prints a progress bar, can use ETA.
Adapted from here: https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console
Params:
iteration: current iteration
total: goal number of iterations
prefix: Optional- Text to append to the beginning
suffix: Optional - Text to append to the end (overwritten by ETA)
decimals: Optional - Decimals to round to
length: Optional - Length of progress bar in characters
fill: Optional - Fill Character
ETA: Optional - Time in seconds since start, triggers printing ETA in suffix
"""
if ETA is not None:
current = time.time()
est_complete = (current-ETA)/(iteration+1)*(total-iteration)+current
est_complete_str = datetime.datetime.fromtimestamp(est_complete).strftime('ETA: %Y-%m-%d %I:%M:%S%p')
suffix = est_complete_str
percent = ("{0:." + str(decimals) + "f}").format(100*(iteration / float(total)))
filledLength = int(length * iteration // total)
bar = fill * filledLength + '-' * (length - filledLength)
print('\r%s |%s| %s%% %s' % (prefix, bar, percent, suffix), end = '\r')
if iteration == total:
print("")
``` |
{
"source": "64-bit/arcade-postprocessing",
"score": 2
} |
#### File: postprocessing/effects/chromatic_abberation.py
```python
import arcade
import imgui
from postprocessing.post_effect import PostEffect
try:
import imgui
import imgui.core
except:
pass
class ChromaticAberration(PostEffect):
def __init__(self, context, window_size):
super().__init__(context, window_size)
self.program = context.load_program(
vertex_shader="postprocessing/core_shaders/fullscreen_quad.vs",
fragment_shader="postprocessing/effects/shaders/chromatic_abberation.fs",
)
self.program['t_source'] = 0
self._axial = 0.0
self._transverse = 0.0
self.axial = 0.3
self.transverse = 0.3
self.distance_scale = 0.01
newWeights = self.compute_weights(15)
self.program['u_channel_weights'] = newWeights[0]
self.program['u_channel_sums'] = newWeights[1]
def compute_weights(self, count):
def lerp(x,y, v):
return (x*(1.0-v)) + y*v
def clamp(value, min_val, max_val):
return max(min(value,max_val),min_val)
#0 = red, 0.5 = green 1 = blue
weight_sums = [0.0, 0.0, 0.0]
weights = []
for x in range(count):
factor = float(x) / float(count)
factor *= 2.0
r = clamp(1.0 - factor , 0.0, 1.0)
g = clamp(1.0 - abs(factor - 1.0) , 0.0, 1.0)
b = clamp(factor - 1.0, 0.0, 1.0)
weights.append(r)
weights.append(g)
weights.append(b)
weight_sums[0] += r
weight_sums[1] += g
weight_sums[2] += b
return (weights, weight_sums)
def apply(self, render_target_pair):
render_target_pair.bind(0)
PostEffect.fullscreen_quad.render(self.program)
def clamp(value, min_val, max_val):
return max(min(value,max_val),min_val)
@property
def axial(self):
return self._axial
@axial.setter
def axial(self, value):
self._axial = ChromaticAberration.clamp(value, 0.0, 1.0)
self.program['u_axial'] = self._axial
#Ensure that axial + transverse do not sum to more than 1.0
if self._axial + self.transverse > 1.0:
self.transverse = 1.0 - self._axial
@property
def transverse(self):
return self._transverse
@transverse.setter
def transverse(self, value):
self._transverse = ChromaticAberration.clamp(value, 0.0, 1.0)
#self.program['u_transverse'] = self._transverse
#Ensure that axial + transverse do not sum to more than 1.0
if self._transverse + self. axial > 1.0:
self.axial = 1.0 - self._transverse
@property
def distance_scale(self):
return self._distance_scale
@distance_scale.setter
def distance_scale(self,value):
self._distance_scale = value
self.program['u_distance_scale'] = value
def show_ui(self):
super().show_ui()
self.axial = imgui.slider_float(f'Strength##{self.ui_index}', self.axial, 0.0, 1.0)[1]
#self.transverse = imgui.slider_float(f'Transverse Aberration##{self.ui_index}', self.transverse, 0.0, 1.0)[1]
self.distance_scale = imgui.slider_float(f'Distance Scale##{self.ui_index}', self.distance_scale, 0.0, 0.1)[1]
```
#### File: postprocessing/effects/greyscale.py
```python
import math
import arcade
from postprocessing.post_effect import PostEffect
try:
import imgui
import imgui.core
except:
pass
class GreyScale(PostEffect):
def __init__(self, context, window_size):
super().__init__(context, window_size)
self.program = context.load_program(
vertex_shader="postprocessing/core_shaders/fullscreen_quad.vs",
fragment_shader="postprocessing/effects/shaders/greyscale.fs",
)
self.program['t_source'] = 0
self.strength = 1.0
self.shadow_color = (0.0,0.0,0.0)
self.highlight_color = (1.0,1.0,1.0)
@property
def strength(self):
return self._strength
@strength.setter
def strength(self,value):
self._strength = value
self.program['u_strength'] = value
@property
def shadow_color(self):
return self._shadow_color
@shadow_color.setter
def shadow_color(self,value):
self._shadow_color = value
self.program['u_shadow_color'] = value
@property
def highlight_color(self):
return self._highlight_color
@highlight_color.setter
def highlight_color(self, value):
self._highlight_color = value
self.program['u_highlight_color'] = value
def apply(self, render_target_pair):
render_target_pair.bind(0)
PostEffect.fullscreen_quad.render(self.program)
def show_ui(self):
super().show_ui()
self.strength = imgui.slider_float(f'Strength##{self.ui_index}', self.strength, 0.0, 1.0)[1]
self.shadow_color = imgui.color_edit3(f'Shadow Color##{self.ui_index}', *self.shadow_color)[1]
self.highlight_color = imgui.color_edit3(f'Highlight Color##{self.ui_index}', *self.highlight_color)[1]
```
#### File: postprocessing/effects/template.py
```python
import math
import arcade
from postprocessing.post_effect import PostEffect
try:
import imgui
import imgui.core
except:
pass
class Tempalte(PostEffect):
def __init__(self, context, window_size):
super().__init__(context, window_size)
self.program = context.load_program(
vertex_shader="postprocessing/core_shaders/fullscreen_quad.vs",
fragment_shader="postprocessing/effects/shaders/template.fs",
)
self.program['t_source'] = 0
def apply(self, render_target_pair):
render_target_pair.bind(0)
PostEffect.fullscreen_quad.render(self.program)
def show_ui(self):
super().show_ui()
pass
```
#### File: arcade-postprocessing/postprocessing/render_target_pair.py
```python
class RenderTargetPair:
#Bind the texture side to a given texture index, and bind the render target side as the current drawing target
def bind(self, texture_index):
raise NotImplementedError("This needs to be implemented in a derrived class")
#Get the (texture,framebuffer) as a tuple pair for more advanced use cases
def get_render_target_pair(self):
raise NotImplementedError("This needs to be implemented in a derrived class")
```
#### File: 64-bit/arcade-postprocessing/sprite_bouncing_coins_post.py
```python
import arcade
import os
import random
from postprocessing.post_processing_chain import PostProcessingChain
from postprocessing.render_target import RenderTarget
from postprocessing.effects.vignette import Vignette
from postprocessing.effects.greyscale import GreyScale
from postprocessing.effects.bloom import Bloom
from postprocessing.effects.tonemap import Tonemap
from postprocessing.effects.split_tone import SplitTone
from postprocessing.effects.chromatic_abberation import ChromaticAberration
from postprocessing.effects.template import Tempalte
from typing import Iterable, Iterator
from typing import Any
from typing import TypeVar
from typing import List
from typing import Tuple
from typing import Optional
from typing import Union
import logging
import math
import array
import time
from PIL import Image
from arcade import Color
from arcade import Matrix3x3
from arcade import Sprite
from arcade import get_distance_between_sprites
from arcade import are_polygons_intersecting
from arcade import is_point_in_polygon
from arcade import rotate_point
from arcade import get_window
from arcade import Point
from arcade import gl
import imgui
import imgui.core
from arcade_imgui import ArcadeRenderer
from arcade_imgui import ArcadeGLRenderer
SPRITE_SCALING = 0.5
SCREEN_WIDTH = 1280
SCREEN_HEIGHT = 1024
SCREEN_TITLE = "Sprite Bouncing Coins"
MOVEMENT_SPEED = 5
class MyGame(arcade.Window):
""" Main application class. """
def __init__(self, width, height, title):
"""
Initializer
"""
super().__init__(width, height, title)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
# Sprite lists
self.coin_list = None
self.wall_list = None
# Must create or set the context before instantiating the renderer
imgui.create_context()
self.renderer = ArcadeRenderer(self)
def setup(self):
""" Set up the game and initialize the variables. """
# Sprite lists
self.wall_list = arcade.SpriteList()
self.coin_list = arcade.SpriteList()
# -- Set up the walls
# Create horizontal rows of boxes
for x in range(32, SCREEN_WIDTH, 64):
# Bottom edge
wall = arcade.Sprite(":resources:images/tiles/boxCrate_double.png", SPRITE_SCALING)
wall.center_x = x
wall.center_y = 32
self.wall_list.append(wall)
# Top edge
wall = arcade.Sprite(":resources:images/tiles/boxCrate_double.png", SPRITE_SCALING)
wall.center_x = x
wall.center_y = SCREEN_HEIGHT - 32
self.wall_list.append(wall)
# Create vertical columns of boxes
for y in range(96, SCREEN_HEIGHT, 64):
# Left
wall = arcade.Sprite(":resources:images/tiles/boxCrate_double.png", SPRITE_SCALING)
wall.center_x = 32
wall.center_y = y
self.wall_list.append(wall)
# Right
wall = arcade.Sprite(":resources:images/tiles/boxCrate_double.png", SPRITE_SCALING)
wall.center_x = SCREEN_WIDTH - 32
wall.center_y = y
self.wall_list.append(wall)
# Create boxes in the middle
for x in range(128, SCREEN_WIDTH, 196):
for y in range(128, SCREEN_HEIGHT, 196):
wall = arcade.Sprite(":resources:images/tiles/boxCrate_double.png", SPRITE_SCALING)
wall.center_x = x
wall.center_y = y
# wall.angle = 45
self.wall_list.append(wall)
# Create coins
for i in range(10):
coin = arcade.Sprite(":resources:images/items/coinGold.png", 0.25)
coin.scale = 1
coin.center_x = random.randrange(100, 700)
coin.center_y = random.randrange(100, 500)
while coin.change_x == 0 and coin.change_y == 0:
coin.change_x = random.randrange(-4, 5)
coin.change_y = random.randrange(-4, 5)
self.coin_list.append(coin)
# Set the background color
arcade.set_background_color(arcade.color.AMAZON)
#setup post processing
self.setup_post_processing()
def setup_post_processing(self):
#Create a new post-processing chain, this will automatically resize with anything you render through it
self.post_processing = PostProcessingChain(self.ctx, self.get_size(), True)
#Allocate and add effects
#Not sure about this method of allocating a object / weird implicit factory thing
self.bloom = self.post_processing.add_effect(Bloom)
self.bloom.threshold = 0.9
self.bloom.power = 1.0
self.tonemap = self.post_processing.add_effect(Tonemap)
self.tonemap.threshold = 2.0
'''
self.chromatic = self.post_processing.add_effect(ChromaticAberration)
self.chromatic.axial = 1.0
self.chromatic.distance_scale = 0.003
self.greyscale = self.post_processing.add_effect(GreyScale)
self.greyscale.strength = 0.5
self.split_tone = self.post_processing.add_effect(SplitTone)
self.vignette = self.post_processing.add_effect(Vignette)
self.vignette.inner_distance = 0.1
'''
self.template = self.post_processing.add_effect(Tempalte)
size = self.get_size()
self.render_target = RenderTarget(self.ctx, size, 'f2')
def on_draw(self):
"""
Render the screen.
"""
# This command has to happen before we start drawing
arcade.start_render()
#Draw to a render target instead of the screen
self.render_target.bind_as_framebuffer()
self.render_target.framebuffer_object.clear(arcade.color.AMAZON)
# Draw all the sprites.
self.wall_list.draw()
self.coin_list.draw()
#Draw coin list again additivly for HDR related reasons
self.coin_list.draw(blend_function=self.ctx.BLEND_ADDITIVE)
self.coin_list.draw(blend_function=self.ctx.BLEND_ADDITIVE)
self.coin_list.draw(blend_function=self.ctx.BLEND_ADDITIVE)
self.coin_list.draw(blend_function=self.ctx.BLEND_ADDITIVE)
#Apply the post processing effect chain to the render target, and apply it to the screen
self.post_processing.apply_effects(self.render_target.texture, self.ctx.screen)
self.draw_gui()
def draw_gui(self):
imgui.new_frame()
self.post_processing.show_postprocess_ui()
imgui.render()
self.renderer.render(imgui.get_draw_data())
def on_update(self, delta_time):
""" Movement and game logic """
for coin in self.coin_list:
coin.center_x += coin.change_x
walls_hit = arcade.check_for_collision_with_list(coin, self.wall_list)
for wall in walls_hit:
if coin.change_x > 0:
coin.right = wall.left
elif coin.change_x < 0:
coin.left = wall.right
if len(walls_hit) > 0:
coin.change_x *= -1
coin.center_y += coin.change_y
walls_hit = arcade.check_for_collision_with_list(coin, self.wall_list)
for wall in walls_hit:
if coin.change_y > 0:
coin.top = wall.bottom
elif coin.change_y < 0:
coin.bottom = wall.top
if len(walls_hit) > 0:
coin.change_y *= -1
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
``` |
{
"source": "64-bit/arcade",
"score": 3
} |
#### File: tests/test_gui/test_ui_manager_dispatcher.py
```python
from unittest.mock import Mock, call
from arcade.gui import UIManager
def test_handler_pushed():
window = Mock()
msg = UIManager(window)
window.assert_has_calls([
call.push_handlers(msg.on_draw,
msg.on_mouse_press,
msg.on_mouse_release,
msg.on_mouse_scroll,
msg.on_mouse_motion,
msg.on_key_press,
msg.on_key_release,
msg.on_text,
msg.on_text_motion,
msg.on_text_motion_select)
])
``` |
{
"source": "64BitAsura/docker-dependency-diagram",
"score": 2
} |
#### File: 64BitAsura/docker-dependency-diagram/snifferGraph.py
```python
import logging
import json
import requests
def get_kafka_cypher(source, topic):
return '''
MERGE (z: HOST {{ name: '{source}' }})
with z
MATCH (m: HOST {{ name: '{source}' }})
MERGE (x: TOPIC {{ name: '{topic}' }})
MERGE (m)-[r:PUB]->(x)
ON CREATE SET r.count = 1
ON MATCH SET r.count = r.count + 1
RETURN id(z)
'''.format(source=source, topic=topic)
def get_http_cypher(source, destination, method):
return '''
MERGE (z: HOST {{ name: '{source}' }})
with z
MATCH (m: HOST {{ name: '{source}' }})
MERGE (x: HOST {{ name: '{destination}' }})
MERGE (m)-[r:REL]->(x)
ON CREATE SET r.count = 1, r.name = '{method}'
ON MATCH SET r.count = r.count + 1
RETURN id(z)
'''.format(source=source, destination=destination, method=method)
def push_topic(source, topic):
cypher = " ".join(get_kafka_cypher(source, topic).splitlines())
push(cypher)
def push_connection(source, destination, method):
cypher = " ".join(get_http_cypher(source, destination, method).splitlines())
push(cypher)
def push(cypher):
try:
logging.warn(cypher)
requests.post('http://neo4j:7474/db/data/transaction/commit',
data=json.dumps({"statements": [{"statement": cypher}]}),
headers={"Content-Type": "application/json",
"Accept": "application/json; charset=UTF-8",
"Authorization": "Basic bmVvNGo6eWV0aQ=="})
except Exception as e:
logging.exception('graph commit failed {}'.format(e.message))
``` |
{
"source": "64bit-lab/Logik",
"score": 4
} |
#### File: Logik/logik/evaluator.py
```python
from . parser import *
from itertools import product
def get_vars(ast):
"""
@brief Exctract every free variables from AST.
@param ast The ast
@return The variables.
"""
var_list = []
def r_get_var(ast):
typ = ast[0]
if typ == 'value':
return
elif typ == 'symb':
if ast[1] not in var_list:
var_list.append(ast[1])
elif typ == 'non':
r_get_var(ast[1])
else:
r_get_var(ast[1])
r_get_var(ast[2])
r_get_var(ast)
return var_list
def make_envs(var_list):
"""
@brief Determine each possible valuation for a set of variables.
@param var_list The variable list
@return A list of possible valuations.
"""
tab = list(product([0, 1], repeat=len(var_list)))
env_list = []
for lines in tab:
env = {}
for i, v in enumerate(var_list):
env[v] = lines[i]
env_list.append(env)
return (env_list, tab)
def evaluate(ast, env):
"""
@brief Evaluate expression represented by AST with respect to valuation ENV
@param ast The ast
@param env The environment (valuation)
@return the result of the evaluation
"""
typ = ast[0]
if typ == 'symb':
return env[ast[1]]
elif typ == 'value':
return ast[1]
elif typ == 'non':
return 1 - evaluate(ast[1], env)
elif typ == 'ou':
return max(evaluate(ast[1], env), evaluate(ast[2], env))
elif typ == 'et':
return min(evaluate(ast[1], env), evaluate(ast[2], env))
elif typ == '->':
return max(1 - evaluate(ast[1], env), evaluate(ast[2], env))
def print_truth_table(ast):
"""
@brief Print the truth table for an expression with free variables.
@param ast The ast
"""
var_list = get_vars(ast)
envs, tab = make_envs(var_list)
if len(var_list) > 0:
print("\nTruth table : \n")
print(*var_list)
print('--'*(len(var_list)))
for i, row in enumerate(envs):
print(*tab[i], end=' ')
print(evaluate(ast, row))
else:
print("\nValue : \n")
print(evaluate(ast, {}))
```
#### File: logik/sat/process_ast.py
```python
def remove_implications(ast):
"""
@brief Removes implications in an AST.
@param ast The ast
@return another AST
"""
if len(ast) == 3:
op, oper1, oper2 = ast
oper1 = remove_implications(oper1)
oper2 = remove_implications(oper2)
if op == '->':
return ('ou', ('non', oper1), oper2)
else:
return ast
return ast
def is_node_op(ast, op):
return ast[0] == op
def is_litteral(ast):
return ast[0] == 'sym' or ast[0] == 'value'
def distribute_or(ast):
"""
@brief Distributes or on and if needed.
@param ast The ast
@return another ast
"""
assert not is_node_op(ast, '->'), \
"Or can only be distributed on implication free AST"
assert ast is not None, "Empty ast"
if is_node_op(ast, 'or'):
_, exprA, exprB = ast
exprA = distribute_or(exprA)
exprB = distribute_or(exprB)
if is_node_op(exprB, 'and'):
_, exprC, exprD = exprB
exprC = distribute_or(exprC)
exprD = distribute_or(exprD)
left = distribute_or(('or', exprA, exprC))
right = distribute_or(('or', exprA, exprD))
return ('and', left, right)
if is_node_op(exprA, 'and'):
_, exprC, exprD = exprA
exprC = distribute_or(exprC)
exprD = distribute_or(exprD)
left = distribute_or(('or', exprC, exprB))
right = distribute_or(('or', exprD, exprB))
return ('and', left, right)
if len(ast) == 2:
return ast
if len(ast) == 3:
a, b, c = ast
return (a, distribute_or(b), distribute_or(c))
def remove_negations(ast):
"""
@brief Removes all negations.
@param ast The ast
@return another ast
"""
assert not is_node_op(ast, '->'), \
"Negations can only be removed on implication free AST"
assert ast is not None, "Empty ast"
if is_node_op(ast, 'non'):
_, exprA = ast
if is_node_op(exprA, 'or'):
_, exprB, exprC = exprA
exprB = remove_negations(('non', exprB))
exprC = remove_negations(('non', exprC))
return ('and', exprB, exprC)
if is_node_op(exprA, 'and'):
_, exprB, exprC = exprA
exprB = remove_negations(('non', exprB))
exprC = remove_negations(('non', exprC))
return ('or', exprB, exprC)
if is_litteral(exprA):
return ('non', exprA)
if is_node_op(exprA, 'non'):
_, exprB = exprA
exprB = remove_negations(exprB)
return exprB
if len(ast) == 3:
op, A, B = ast
A = remove_negations(A)
B = remove_negations(B)
return (op, A, B)
if len(ast) == 2:
return ast
def prepare_for_cnf(ast):
"""
@brief Prepare an ast to be converted in Conjuntive Normal Form.
@param ast The ast
@return another AST ready to be converted in CNF.
"""
ast = remove_implications(ast)
ast = remove_negations(ast)
ast = distribute_or(ast)
return ast
```
#### File: Logik/tests/test_parser.py
```python
from logik import parser
from logik import lexer
from logik.parser import parse
from logik.parser import Lexbuf
from logik.lexer import lex
def test_parse():
outputA = parse(Lexbuf(list(lex("a et b"))))
expectedA = ('et', ('symb', 'a'), ('symb', 'b'))
outputB = parse(Lexbuf(list(lex("a ou b"))))
expectedB = ('ou', ('symb', 'a'), ('symb', 'b'))
outputC = parse(Lexbuf(list(lex("a -> b"))))
expectedC = ('->', ('symb', 'a'), ('symb', 'b'))
outputD = parse(Lexbuf(list(lex("((a et b))"))))
expectedD = ('et', ('symb', 'a'), ('symb', 'b'))
outputE = parse(Lexbuf(list(lex("non a"))))
expectedE = ('non', ('symb', 'a'))
outputF = parse(Lexbuf(list(lex("a -> non (b ou c)"))))
expectedF = ('->', ('symb', 'a'), ('non', ('ou', ('symb', 'b'), ('symb', 'c'))))
outputG = parse(Lexbuf(list(lex("a et b ou c"))))
expectedG = ('ou', ('et', ('symb', 'a'), ('symb', 'b')), ('symb', 'c'))
assert outputA == expectedA, \
"simple AND expressions should be parsed correctly"
assert outputC == expectedC, \
"simple OR expressions should be parsed correctly"
assert outputC == expectedC, \
"simple IMPILCATIONS expressions should be parsed correctly"
assert outputD == expectedD, \
"over parenthesized expressions should be parsed correctly"
assert outputE == expectedE, \
"simple NEGATIONS expressions should be parsed correctly"
assert outputF == expectedF, \
"chained expressions should be parsed correctly"
assert outputG == expectedG, \
"chained AND, OR expressions should be parsed with correct precedence"
def run():
print("TEST parse ...")
test_parse()
print('-> ok')
``` |
{
"source": "64bitpandas/full-waitlist-enroller",
"score": 3
} |
#### File: 64bitpandas/full-waitlist-enroller/enroll.py
```python
import os
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait, Select
from selenium.webdriver.support import expected_conditions as ExpCond
from selenium.webdriver.common.by import By
from six.moves import input as raw_input
from constants import *
from cal_auth import auth_calnet
def swap_loop(driver):
"""Runs the swapping behavior in a loop until the user quits the program."""
# navigate to the swap page
driver.switch_to.frame(0)
n = 0
radio_btn = None
while radio_btn is None and n < 5:
term_texts = driver.find_elements_by_id('TERM_CAR${0}'.format(n))
if len(term_texts) > 0 and term_texts[0].text == TERM:
radio_btn = driver.find_element_by_id('SSR_DUMMY_RECV1$sels${0}$$0'.format(n))
else:
n += 1
radio_btn.click()
driver.find_element_by_id('DERIVED_SSS_SCT_SSR_PB_GO').click()
# find the courses and select them
WebDriverWait(driver, 10).until(ExpCond.presence_of_element_located((By.ID, 'DERIVED_REGFRM1_DESCR50$225$')))
swap_selected, course_selected = False, False
swap_dropdown = driver.find_element_by_id('DERIVED_REGFRM1_DESCR50$225$')
for option in swap_dropdown.find_elements_by_tag_name('option'):
if SWAP_NAME in option.text:
print('Selected {0} to swap'.format(option.text))
option.click()
swap_selected = True
course_dropdown = driver.find_element_by_id('DERIVED_REGFRM1_SSR_CLASSNAME_35$183$')
for option in course_dropdown.find_elements_by_tag_name('option'):
if COURSE_NAME in option.text:
print('Selected {0} to enroll'.format(('' + option.text).replace('\n', ' ')))
option.click()
course_selected = True
if not swap_selected:
print("ERROR: Course {0} not found in your enrolled classes!".format(SWAP_NAME))
elif not course_selected:
print("ERROR: Course {0} not found in your shopping cart!".format(COURSE_NAME))
else:
# Wait until enroll page is loaded
driver.find_element_by_id('DERIVED_REGFRM1_SSR_PB_ADDTOLIST1$184$').click()
WebDriverWait(driver, 10).until(ExpCond.presence_of_element_located((By.ID, 'DERIVED_REGFRM1_SSR_PB_SUBMIT')))
# Wait until confirmation message appears
driver.find_element_by_id('DERIVED_REGFRM1_SSR_PB_SUBMIT').click()
WebDriverWait(driver, 10).until(ExpCond.presence_of_element_located((By.ID, 'DERIVED_REGFRM1_DESCR1$8$')))
message_xpath = '/html/body/form/div[5]/table/tbody/tr/td/div/table/tbody/tr[9]/td[2]/div/table/tbody/tr/td/table/tbody/tr[2]/td[2]/div/div'
if 'no available seats' in driver.find_element_by_xpath(message_xpath).text:
print("\nClass is full! Enrollment was unsuccessful.")
get_input(driver)
else:
print("\nEnrollment successful! Congrats on getting into the waitlist for {0}.".format(COURSE_NAME))
def get_input(driver):
"""Gets input of whether or not the user wants to continue trying."""
choice = raw_input("\nTry again? (Y/N) > ")
if choice == 'Y':
driver.switch_to.parent_frame()
driver.find_element_by_id('PT_WORK_PT_BUTTON_BACK').click()
swap_loop(driver)
elif choice == 'N':
driver.quit()
else:
print("Invalid choice.")
get_input(driver)
def enroll():
print("""
======================================================
| Full Waitlist Enroller |
| by 64bitpandas |
| version 0.0.1 |
======================================================
""")
if (not DISABLE_WARNING):
print("""
>> NOTICE!
This program is still in development.
Unintended behavior may result from the use of this program.
Further usage of this program means that you run this program at your own risk.
IMPORTANT: DO NOT TRUST YOUR PERSONAL DATA IN THE WRONG HANDS!
Always double check the source code of what you are running.
You may safely quit this application through CTRL+C.
To disable this warning, edit the DISABLE_WARNING flag in constants.py.
""")
driver_options = webdriver.ChromeOptions()
driver_options.add_argument("--ignore-certificate-errors-spki-list")
driver_options.add_argument("--ignore-ssl-errors")
driver_options.add_argument('log-level=3')
driver_options.headless = HEADLESS
driver = webdriver.Chrome(
executable_path="./chromedriver.exe",
options=driver_options)
if auth_calnet(driver):
swap_loop(driver)
else:
print("Authentication failed. Quitting this application...")
driver.quit()
if __name__ == "__main__":
enroll()
``` |
{
"source": "64bit/web-crawler",
"score": 3
} |
#### File: web-crawler/rules/rule.py
```python
class Rule(object):
def __init__(self):
pass
def matches(self, url_parse_result):
# derived classes will implement this
return True
``` |
{
"source": "64bit/wikiracer",
"score": 3
} |
#### File: wikiracer/algorithm/bfs.py
```python
from algorithm import Algorithm
from collections import deque
class BFS(Algorithm):
def __init__(self, config, graph):
super(BFS, self).__init__(config, graph)
def debug(self, pageid):
print("current page: ", pageid )
page = self.graph.page(pageid)
print("page: ", page)
def run(self):
q = deque()
q.append(self.start_page["pageid"])
while( len(q) > 0 and not self.found):
pageid = q.popleft()
#self.debug(pageid)
page_links = self.graph.adj(pageid)
for page_link in page_links:
# skip self links:
if( pageid == page_link["pageid"]):
continue
# already visited
if( page_link["pageid"] in self.parents):
continue
self.parents[ page_link["pageid"] ] = pageid
if(page_link["pageid"] == self.end_page["pageid"]):
self.found = True
break
q.append(page_link["pageid"])
```
#### File: wikiracer/cache/db_cache.py
```python
from cache import Cache
class DBCache(Cache):
def __init__(self, dbstore):
#setup db
self.dbstore = dbstore
def get(self, key):
return self.dbstore.get_page_links(key)
def put(self, key, value):
self.dbstore.save_page_links(key, value)
```
#### File: wikiracer/test/test_store.py
```python
import unittest
import sys
sys.path.append("../")
from store.store import Store
from store.neo4jstore import Neo4jStore
from store.sqlitestore import SqliteStore
from neo4j.v1 import GraphDatabase, basic_auth
#TODO fix tests
'''
class TestStore(unittest.TestCase):
def setUp(self):
self.store = Neo4jStore()
self.pages = [
{
'pageid': 1,
'title': 'one',
'fullurl' : 'https://wiki.com/one'
},
{
'pageid': 2,
'title': 'two',
'fullurl' : 'https://wiki.com/two'
},
]
self.pages_dist_1 = [
{
'pageid': 3,
'title': 'three',
'fullurl' : 'https://wiki.com/three'
},
{
'pageid': 4,
'title': 'four',
'fullurl' : 'https://wiki.com/four'
},
]
self.pages_dist_2 = [
{
'pageid': 5,
'title': 'five',
'fullurl' : 'https://wiki.com/five'
},
{
'pageid': 6,
'title': 'six',
'fullurl' : 'https://wiki.com/six'
},
]
def tearDown(self):
pass
def test_save_pages(self):
self.store.save_pages(self.pages)
for page in self.pages:
read_page = self.store.get_page_from_id(page['pageid'])
self.assertEqual(read_page, page)
def test_save_and_get_page_links(self):
self.store.save_pages(self.pages)
self.store.save_page_links(1, self.pages_dist_1)
self.store.save_page_links(3, self.pages_dist_2)
read_page_links = self.store.get_page_links(1)
self.assertEqual(2, len(read_page_links))
page_3 = filter(lambda p: p['pageid'] == 3, read_page_links)[0]
page_4 = filter(lambda p: p['pageid'] == 4, read_page_links)[0]
self.assertEqual(self.pages_dist_1[0], page_3)
self.assertEqual(self.pages_dist_1[1], page_4)
read_page_links2 = self.store.get_page_links(3)
self.assertEqual(2, len(read_page_links2))
page_5 = filter(lambda p: p['pageid'] == 5, read_page_links2)[0]
page_6 = filter(lambda p: p['pageid'] == 6, read_page_links2)[0]
self.assertEqual(self.pages_dist_2[0], page_5)
self.assertEqual(self.pages_dist_2[1], page_6)
self.assertEqual([], self.store.get_page_links(2))
self.assertEqual([], self.store.get_page_links(4))
self.assertEqual([], self.store.get_page_links(5))
self.assertEqual([], self.store.get_page_links(6))
def test_get_page_from_url_title(self):
self.store.save_pages(self.pages)
page1 = self.store.get_page_from_url_title('one')
self.assertEqual(1, len(page1))
self.assertEqual(self.pages[0], page1[0])
'''
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "64blowfish/fb-data",
"score": 2
} |
#### File: fb-data/fbdata/temp.py
```python
def foo() -> str:
"""Returns bar"""
return "bar"
def untested():
return True
``` |
{
"source": "64/hh",
"score": 3
} |
#### File: hh/test/test_connect.py
```python
import socket
import unittest
import time
import ssl
def connect_tls():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS)
try:
s.connect(("localhost", 8000))
except:
s.shutdown(socket.SHUT_RDWR)
s.close()
return s
def connect_h2():
sock = connect_tls()
sock.sendall(bytes("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n", "ascii"))
return sock
class TestConnect(unittest.TestCase):
def test_connection_preface(self):
sock = connect_tls()
sock.sendall(bytes("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n", "ascii"))
sock.close()
def test_initial_settings(self):
sock = connect_h2()
sock.sendall(bytes([0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00]))
sock.close()
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "64jantuma/atsign.dev",
"score": 3
} |
#### File: .github/@automation/detail_at.py
```python
import os, sys, json, requests, yaml
# Color constants
# Reference: https://gist.github.com/chrisopedia/8754917
COLERR="\033[0;31m"
COLINFO="\033[0;35m"
COLRESET="\033[m"
baseurl = 'https://pub.dev/api'
headers = {"Content-Type": "application/json", "Accept": "application/vnd.pub.v2+json", "accept-encoding" : "gzip"}
list_file = "./.github/@automation/at_pubdev.list"
output_file = "./content/en/docs/Functional_Architecture/libraries.md"
def detail_at_repos(package):
# Get list of all repos in an org
response = requests.get(baseurl + "/packages/" + package,
headers=headers)
if response.status_code != 200:
# An error occured
print(COLERR + "Error getting package data : " +
str(response.status_code) + " " + response.text + COLRESET)
# Convert repos to YAML
json_details = json.loads(response.text)
#print(json_details["latest"])
f.write(f'### {json_details["latest"]["pubspec"]["name"]}\n\n')
f.write(f'{json_details["latest"]["pubspec"]["description"]}\n\n')
f.write(f'[Learn more](https://pub.dev/packages/{json_details["latest"]["pubspec"]["name"]})\n\n')
f = open(output_file, 'w')
f.write('---\n')
f.write('title: "Libraries"\n')
f.write('linkTitle: "Libraries"\n')
f.write('parent: /Functional_Architecture/\n')
f.write('weight: 2\n')
f.write('description: >\n')
f.write(' Find the list of libraries the @platform has to offer here!\n')
f.write('---\n\n\n')
at_list = open(list_file,'r')
while(True):
package = at_list.readline().strip("\n")
if not package:
break
print(package)
detail_at_repos(package)
at_list.close
f.close
``` |
{
"source": "64Mega/sixtyfour",
"score": 2
} |
#### File: sixtyfour/info/context_processors.py
```python
from django.conf import settings
from .models import LinkList
def get_linklists(request):
res = LinkList.objects.all()
lut = {}
for ll in res:
lut.update({ll.key:ll.link_set.filter(published=True).order_by('sort').all()})
return {
'link_list':lut,
}
all_processors = [
get_linklists,
]
def all(request):
combined = {}
for f in all_processors:
combined.update(f(request))
return combined
```
#### File: sixtyfour/moderation/admin.py
```python
from django.contrib import admin
from .models import AuthLog
class AuthLogAdmin(admin.ModelAdmin):
model = AuthLog
readonly_fields = ['date','user','ip_address','city','region','country','continent']
list_display = ['date','user','ip_address','city','region','country','continent']
list_display_links = None
list_filter = ['date']
list_select_related = ['user']
ordering = ['-date']
search_fields = ['=user__username','=ip_address','=city','=region','=country','=continent']
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
def save_model(self, request, obj, form, change):
pass
def delete_model(self, request, obj):
pass
def save_related(self, request, form, formsets, change):
pass
admin.site.register(AuthLog,AuthLogAdmin)
```
#### File: sixtyfour/sixtyfour/formatters.py
```python
from bbcode import Parser
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from urllib.parse import urlparse, parse_qs
import random
import re
from markdown import markdown
from .utils import static_var
from pygments.formatters import HtmlFormatter;
from pygments.lexers import get_lexer_by_name, guess_lexer;
from pygments import highlight;
from pygments.util import ClassNotFound
# BB64 Meta (Decorators)
def bb64_embed_responsive(fn):
def wrapper_fn(tag_name, value, options, parent, context):
inner=fn(tag_name, value, options, parent, context)
return format_html('<div class="embed-responsive embed-responsive-16by9">{}</div>',inner)
return wrapper_fn
def bb64_exempt_preview(placeholder=None):
def decorator(fn):
def wrapper_fn(tag_name, value, options, parent, context):
if context and 'preview' in context and context['preview']:
return placeholder if placeholder is not None else '[%s]'%tag_name
return fn(tag_name, value, options, parent, context)
return wrapper_fn
return decorator
# BB64 Tags
def bb64_img(tag_name, value, options, parent, context):
title = 'Image'
width = ''
height = ''
if 'title' in options:
title = options['title']
if 'width' in options:
width = options['width']
if 'height' in options:
height = options['height']
return format_html('<img class="bbcode-img" src="{value}" title="{title}" width="{width}" height="{height}">',
value=value, title=title, width=width, height=height
)
def bb64_rev(tag_name, value, options, parent, context):
return format_html('<span class="bbcode-rev">{value}</span>', value=value[::-1])
def bb64_font(tag_name, value, options, parent, context):
font = 'sans serif'
if 'font' in options:
font = options['font']
return format_html('<span style="font-family: {font};">{value}</span>', font=font, value=mark_safe(value))
def bb64_size(tag_name, value, options, parent, context):
size = ''
if 'size' in options:
size = re.sub(r"\D", "", options['size'])
return format_html('<span style="font-size: {size}pt;">{value}</span>', size=size, value=mark_safe(value))
def bb64_color(tag_name, value, options, parent, context):
if tag_name in options:
color = options[tag_name].strip()
elif options:
color = list(options.keys())[0].strip()
else:
return value
match = re.match(r'^([a-z]+)|^(#[a-f0-9]{3,6})', color, re.I)
color = match.group() if match else 'inherit'
return format_html('<span style="color:{color};">{value}</span>', color=color, value=mark_safe(value))
def bb64_tnail(tag_name, value, options, parent, context):
width = '204'
height = ''
post_id = context['post'].id
gallery_id = "gallery-%s" % post_id
title = 'Image thumbnail'
if 'width' in options:
width = options['width']
if 'height' in options:
height = options['height']
if 'gallery' in options:
gallery_id = options['gallery']
if 'title' in options:
title = options['title']
return format_html("""
<a data-fancybox="{gallery_id}" href="{value}">
<img title="{title}" src="{value}" width="{width}" height="{height}">
</a>
""",
gallery_id=gallery_id, title=title, value=value, width=width, height=height
)
def bb64_shh(tag_name, value, options, parent, context):
current_user = context['request'].user
if not current_user:
return ""
target_user = ''
if 'shh' in options:
target_user = options['shh']
if current_user.is_authenticated and current_user.username == target_user:
return format_html("""
<div class="card">
<div class="card-header">
Whispering to {target_user}
</div>
<div class="card-body">
{value}
</div>
</div>
""", target_user=target_user, value=mark_safe(value))
elif not current_user.is_authenticated and target_user == 'guest':
return format_html("""
<div class="card">
<div class="card-header">
Whispering to guest
</div>
<div class="card-body">
{value}
</div>
</div>
""", value=mark_safe(value))
else:
return ""
def bb64_blind(tag_name, value, options, parent, context):
current_user = context['request'].user
if not current_user:
return ""
target_user = ''
if 'blind' in options:
target_user = options['blind']
if current_user.is_authenticated and current_user.username != target_user:
return format_html("""
<div class="card">
<div class="card-header">
Hiding from {target_user}
</div>
<div class="card-body">
{value}
</div>
</div>
""", target_user=target_user, value=mark_safe(value))
else:
return ""
def bb64_quote(tag_name, value, options, parent, context):
target_user = options['quote'] if (options and 'quote' in options) else ''
return format_html("""
<div class="card">
<div class="card-header">
Quote: {target_user}
</div>
<div class="card-body">
{value}
</div>
</div>
""", target_user=target_user, value=mark_safe(value))
@static_var(hide_index = 0)
def bb64_hide(primary_reason=None, show=False, header_class='', button_class='text-primary'):
@bb64_exempt_preview("")
def bb64_hide_internal(tag_name, value, options, parent, context):
if primary_reason == None:
if_collapsed = "Show: "
if_not_collapsed = "Hide: "
reason = ""
else:
if_collapsed = ""
if_not_collapsed = ""
reason = primary_reason
if tag_name in options:
reason += options[tag_name]
bb64_hide.hide_index += 1
params = {
'header_class':header_class,
'button_class':button_class,
'show_class':'show' if show else '',
'collapsed_class':'' if show else 'collapsed',
'reason':reason,
'hide_id':"bbcode-hide-%d" % (bb64_hide.hide_index),
'value':mark_safe(value),
'if_collapsed':if_collapsed,
'if_not_collapsed':if_not_collapsed
}
return format_html("""
<div class="card">
<div class="card-header {header_class}">
<button class="btn btn-link {button_class} {collapsed_class}" data-toggle="collapse" data-target="#{hide_id}">
<span class="if-collapsed">[+] {if_collapsed}</span><span class="if-not-collapsed">[-] {if_not_collapsed}</span>
{reason}
</button>
</div>
<div id="{hide_id}" class="collapse {show_class}">
<div class="card-body">
{value}
</div>
</div>
</div>
""", **params )
return bb64_hide_internal
def bb64_user(tag_name, value, options, parent, context):
user = ''
if 'user' in options:
user = options ['user']
return format_html('<a href="/user/{user}">{user}</a>', user=user)
def bb64_profile(tag_name, value, options, parent, context):
user = ''
if 'profile' in options:
user = options['profile']
return format_html('<a href="/user/{user}">{user}</a>', user=user)
def bb64_rand(tag_name, value, options, parent, context):
choices = re.split(r"\[[oO0*@+x#|]\]", value)
return choices[random.randint(0, len(choices)-1)]
def bb64_markdown(tag_name, value, options, parent, context):
return markdown(value)
def bb64_theusertag(tag_name, value, options, parent, context):
current_user = context['request'].user
if current_user.is_authenticated:
return current_user.username
else:
return 'guest'
@bb64_exempt_preview("(Embedded Audio)")
def bb64_h5audio(tag_name, value, options, parent, context):
return format_html('<audio src={value} controls preload="none">Audio not supported</audio>', value=value)
@bb64_exempt_preview("(Embedded Video)")
def bb64_h5video(tag_name, value, options, parent, context):
return format_html('<video src={value} controls>Video not supported</video>', value=value)
def get_yt_video_id(url):
if not re.match('[\./]',url):
return url
if url.startswith(('youtu', 'www')):
url = 'http://' + url
query = urlparse(url)
if 'youtube' in query.hostname:
if query.path == '/watch':
return parse_qs(query.query)['v'][0]
elif query.path.startswith(('/embed/', '/v/')):
return query.path.split('/')[2]
elif 'youtu.be' in query.hostname:
return query.path[1:]
else:
return ValueError
@bb64_exempt_preview("(Embedded Video)")
@bb64_embed_responsive
def bb64_youtube(tag_name, value, options, parent, context):
video_id = get_yt_video_id(value)
return format_html("""
<iframe width="640" height="360"
class="bbcode-youtube"
src="https://www.youtube.com/embed/{video_id}"
frameborder="0"
allow="encrypted-media;picture-in-picture" allowfullscreen></iframe>
""", video_id=video_id)
@bb64_exempt_preview("(Embedded Video)")
@bb64_embed_responsive
def bb64_vimeo(tag_name, value, options, parent, context):
video_id = value.split("/")[-1]
return format_html("""
<iframe src="https://player.vimeo.com/video/{video_id}"
width="640"
height="360"
frameborder="0"
allow="fullscreen" allowfullscreen></iframe>
""", video_id=video_id)
@bb64_exempt_preview("(Embedded Audio)")
def bb64_soundcloud(tag_name, value, options, parent, context):
return format_html("""
<iframe
width="100%" height="300"
scrolling="no"
frameborder="no"
src="https://w.soundcloud.com/player/?url={value}"></iframe>
""", value=value)
@bb64_exempt_preview("(Embedded Security Risk)")
@bb64_exempt_preview
def bb64_flash(tag_name, value, options, parent, context):
width = '640'
height = '360'
if 'width' in options:
width = options['width']
if 'height' in options:
height = options['height']
return format_html("""
<object type="application/x-shockwave-flash"
data="{value}"
width="{width}" height="{height}">
<param name="movie" value="{value}" />
<param name="quality" value="high"/>
</object>
""", width=width, height=height, value=value)
@bb64_exempt_preview()
def bb64_paypal(tag_name, value, options, parent, context):
paypal_button_id = value.split("/")[-1]
return format_html("""
<form action="https://www.paypal.com/cgi-bin/webscr" method="post" target="_top">
<input type="hidden" name="cmd" value="_s-xclick" />
<input type="hidden" name="hosted_button_id" value="{paypal_button_id}" />
<input type="image" src="https://www.paypalobjects.com/en_US/i/btn/btn_donate_LG.gif" border="0" name="submit" title="PayPal - The safer, easier way to pay online!" alt="Donate with PayPal button" />
</form>""", paypal_button_id=paypal_button_id)
def bb64_code(tag_name, value, options, parent, context):
lang = 'text'
if tag_name in options:
lang = options[tag_name]
lexer = None
try:
lexer = get_lexer_by_name(lang)
except ClassNotFound:
try:
lexer = guess_lexer(value)
except ClassNotFound:
lexer = get_lexer_by_name('text')
title="" if lang == 'text' else lang
formatter = HtmlFormatter(linenos=False)
result = highlight(value, lexer, formatter)
return format_html("""
<div class="card card-code">
<div class="card-body bbcode-code"><span class="text-muted float-right">{title}</span>{result}</div>
</div>
""", title=title, result=mark_safe(mark_safe(result)))
def ExtendedParser(*args, **kwargs):
parser = Parser(newline="<p></p>", *args, **kwargs)
simple=[
'b','i','u','em', 'tt',
'sub', 'sup', 'ul','ol','li',
'h1','h2','h3','h4','h5','h6'
]
for t in simple:
parser.add_simple_formatter(t, '<'+t+'>%(value)s</'+t+'>')
parser.add_simple_formatter('right', '<span class="bbcode-right">%(value)s</span>', transform_newlines=False)
parser.add_simple_formatter('flex', '<div class="bbcode-flex">%(value)s</div>')
parser.add_simple_formatter('u2', '<span style="border-bottom: 1px dotted gray;">%(value)s</span>')
parser.add_simple_formatter('o', '<span style="text-decoration: overline;">%(value)s</span>')
parser.add_simple_formatter('br', '<br />', standalone=True)
def bind(*args,**kwargs):
parser.add_formatter(*args, **kwargs)
bind('img', bb64_img, replace_links=False)
bind('quote', bb64_quote, swallow_trailing_newline=True)
bind('code', bb64_code, render_embedded=False, escape_html=False, replace_cosmetic=False)
bind('rev', bb64_rev)
bind('font', bb64_font)
bind('size', bb64_size)
bind('color', bb64_color)
bind('tnail', bb64_tnail, replace_links=False)
bind('hide', bb64_hide(), swallow_trailing_newline=True)
bind('show', bb64_hide(show=True), swallow_trailing_newline=True)
bind('nsfw', bb64_hide("NSFW: ", header_class='bg-danger', button_class='text-white'), swallow_trailing_newline=True)
bind('spoiler', bb64_hide("Spoiler: ", header_class='bg-dark', button_class='text-white'), swallow_trailing_newline=True)
bind('shh', bb64_shh, swallow_trailing_newline=True)
bind('blind', bb64_blind, swallow_trailing_newline=True)
bind('user', bb64_user,standalone=True)
bind('profile', bb64_profile, standalone=True)
bind('h5audio', bb64_h5audio, replace_links=False)
bind('h5video', bb64_h5video, replace_links=False)
bind('audio', bb64_h5audio, replace_links=False)
bind('youtube', bb64_youtube, replace_links=False)
bind('youtubehd', bb64_youtube, replace_links=False)
bind('youtubeaudio', bb64_youtube, replace_links=False)
bind('vimeo', bb64_vimeo, replace_links=False)
bind('soundcloud', bb64_soundcloud, replace_links=False)
#bind('flash', bb64_flash, replace_links=False) # Too risky
bind('paypal', bb64_paypal, replace_links=False)
bind('theusertag', bb64_theusertag, standalone=True)
bind('rand', bb64_rand)
bind('markdown', bb64_markdown, render_embedded=False)
aliases = {
'ln': 'hr',
'col': 'color',
'colour': 'color',
'md': 'markdown',
'choice': 'rand',
}
for k,v in aliases.items():
parser.recognized_tags[k] = parser.recognized_tags[v]
return parser
main_parser = ExtendedParser()
def bbcode64(entry, context=None):
context['parser_obj'] = entry
processed = main_parser.format(entry.entry.strip(), **context)
return format_html('<div class="bbcode"><p>{}</p></div>',mark_safe(processed))
```
#### File: sixtyfour/user/models.py
```python
from django.db import models
from django.db.models import Q
from django.db.models.signals import post_save
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.urls import reverse
from django.dispatch import receiver
from sixtyfour.formatters import bbcode64
import datetime, hashlib, os
def get_sentinel_user():
return get_user_model().objects.get_or_create(username='deleted')[0]
def user_avatar_path(instance, filename):
name, ext = os.path.splitext(filename)
username = instance.user.username
newname = hashlib.sha1(username.encode('utf-8')).hexdigest() + ext
return 'profile/avatar/{}'.format(newname)
def user_banner_path(instance, filename):
name, ext = os.path.splitext(filename)
username = instance.user.username
newname = hashlib.sha1(username.encode('utf-8')).hexdigest() + ext
return 'profile/banner/{}'.format(newname)
class Profile(models.Model):
avatar = models.ImageField(max_length=128,blank=True,upload_to=user_avatar_path)
banner = models.ImageField(max_length=128,blank=True,upload_to=user_banner_path)
profile = models.TextField(blank=True)
location = models.CharField(max_length=40, blank=True)
hit_counter = models.IntegerField()
old_password = models.CharField(max_length=512, blank=True, default='')
user = models.OneToOneField(
get_user_model(),
on_delete = models.CASCADE,
primary_key = True
)
@property
def url(self):
return reverse('user:listing', kwargs={'username':self.user.username})
@property
def avatar_url(self):
#return "%s%s" % (settings.AVATAR_URL, self.avatar)
return self.avatar.url
@property
def banner_url(self):
#return "%s%s" % (settings.BANNER_URL, self.banner)
return self.banner.url
@property
def is_regular(self):
#return (datetime.now() - self.user.date_joined).days > 90
# All users are regular users
return True
@property
def entry(self):
return self.profile
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if kwargs.get('created', True) and not kwargs.get('raw', False):
Profile.objects.get_or_create(user=instance,defaults={'hit_counter':0})
def __str__(self):
return 'Profile: %s' % (self.user.username)
class PostVisibility():
PUBLIC=0
REGISTERED=1
REGULAR=2
STAFF=3
PERSONAL=4
choices = [
(PUBLIC, 'Public'),
(REGISTERED, 'Registered Members'),
(REGULAR, 'Regular Members'),
(STAFF, 'Staff Members'),
(PERSONAL, 'Only Me')
]
class PostManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(deleted=False)
class Post(models.Model):
title = models.CharField(max_length=100)
entry = models.TextField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(null=True, blank=True, default=None)
show_recent = models.BooleanField(default=True)
pinned = models.BooleanField(default=False)
locked = models.BooleanField(default=False)
private = models.SmallIntegerField(
choices=PostVisibility.choices,
default=PostVisibility.PUBLIC
)
deleted = models.BooleanField(default=False)
user = models.ForeignKey(
get_user_model(),
on_delete=models.SET(get_sentinel_user)
)
posts = PostManager()
objects = models.Manager()
@property
def comments_count(self):
return Comment.comments.filter(post=self).count()
def get_absolute_url(self):
return reverse('user:post', kwargs={'username': self.user.username, 'entry': self.id})
@staticmethod
def posts_visible(user):
if user.is_authenticated:
if user.is_staff:
query = Q(private__lte=PostVisibility.STAFF)
elif user.profile.is_regular:
query = Q(private__lte=PostVisibility.REGULAR)
else:
query = Q(private__lte=PostVisibility.REGISTERED)
query = query | Q(user=user)
else:
query = Q(private=PostVisibility.PUBLIC)
return Post.posts.filter(query)
def user_can_view(self, user):
visible = (self.private == PostVisibility.PUBLIC)
visible = visible or (self.user == user)
visible = visible or (user.is_staff)
if user.is_authenticated:
visible = visible or (self.private == PostVisibility.REGISTERED)
visible = visible or (self.private == PostVisibility.REGULAR and user.profile.is_regular)
return visible
@property
def visible_description(self):
if self.private == PostVisibility.PUBLIC:
return ''
else:
desc = [v[1] for i,v in enumerate(PostVisibility.choices) if v[0] == self.private]
return 'Visible to %s' % (desc[0])
def __str__(self):
return '[%s] %s' % (self.user.username,self.title)
class Meta:
ordering = ['-created']
class CommentManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(deleted=False)
class Comment(models.Model):
entry = models.TextField()
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(null=True, blank=True, default=None)
deleted = models.BooleanField(default=False)
post = models.ForeignKey(Post, on_delete=models.CASCADE)
user = models.ForeignKey(
get_user_model(),
on_delete=models.SET(get_sentinel_user)
)
parent = models.ForeignKey('self', on_delete=models.DO_NOTHING, blank=True, null=True, default=None)
comments = CommentManager()
objects = models.Manager()
@property
def children(self):
return self.comment_set.all()
class Meta:
ordering = ['created']
``` |
{
"source": "64u71bm/Gym-Fitness-Tracker",
"score": 3
} |
#### File: 64u71bm/Gym-Fitness-Tracker/swek.py
```python
from tkinter import * #GUI package
import tkinter as tk
import sqlite3 as sq #For tables and database
import datetime
class EntryWithPlaceholder(tk.Entry):
def __init__(self, master=None, placeholder="PLACEHOLDER", color='grey', textvariable='enter'):
super().__init__(master, textvariable=textvariable)
self.placeholder = placeholder
self.placeholder_color = color
self.default_fg_color = self['fg']
self.bind("<FocusIn>", self.foc_in)
self.bind("<FocusOut>", self.foc_out)
self.put_placeholder()
def put_placeholder(self):
self.insert(0, self.placeholder)
self['fg'] = self.placeholder_color
def foc_in(self, *args):
if self['fg'] == self.placeholder_color:
self.delete('0', 'end')
self['fg'] = self.default_fg_color
def foc_out(self, *args):
if not self.get():
self.put_placeholder()
global MAINID
window = Tk()
window.title("ASD LAB PROJECT")
window.geometry('800x600+0+0')
header = Label(window, text="Gym Fitness Tracker", font=("arial",30,"bold"), fg="steelblue")
header.pack(ipady=50)
con = sq.connect('Gym.db') #dB browser for sqlite needed
c = con.cursor() #SQLite command, to connect to db so 'execute' method can be called
L2 = Label(window, text = "Date", font=("arial",18)).place(x=10,y=150)
L5 = Label(window, text = "Max Weight (KG)", font=("arial",18)).place(x=10,y=200)
L6 = Label(window, text = "Reps", font=("arial",18)).place(x=10,y=250)
L7 = Label(window, text = "Category", font=("arial", 18)).place(x=10,y=300)
#Create variables for each list
comp = StringVar(window)#For 1st dd
comp.set('----') #Inital placeholder for field
comp = StringVar(window)#2nd dropdown list
comp.set('----')
day = StringVar(window)
month = StringVar(window)
year = StringVar(window)
weight = StringVar(window)
reps = StringVar(window)
#Dictionary for drop down list x2
compound = {'Bench', 'Squat', 'Deadlift','OVH'}
compd = OptionMenu(window, comp, *compound).place(x=220,y=305)
#compase = OptionMenu(window, comp, *compound).place(x=100,y=500)
#ENTRY VALUE CONFIG!
now = datetime.datetime.now()
dayT = EntryWithPlaceholder(window, textvariable=day, placeholder=now.day)
dayT.config(width=5)
dayT.place(x=220,y=155)
monthT = EntryWithPlaceholder(window, textvariable=month, placeholder=now.month)
monthT.config(width=5)
monthT.place(x=278,y=155)
yearT = EntryWithPlaceholder(window, textvariable=year, placeholder=now.year)
yearT.config(width=5)
yearT.place(x=336,y=155)
weightT = EntryWithPlaceholder(window, textvariable=weight, placeholder="100")
weightT.config(width=18)
weightT.place(x=220,y=205)
repT = EntryWithPlaceholder(window, textvariable=reps, placeholder="10")
repT.config(width=18)
repT.place(x=220,y=255)
def set_vals(*args):
comp.set(args[0])
day.set(args[1])
month.set(args[2])
year.set(args[3])
weight.set(args[4])
reps.set(args[5])
def clear():
set_vals('----', '', '', '' ,'', '')
#get func to isolate the text entered in the entry boxes and submit to database
def get():
print("You have submitted a record")
c.execute('CREATE TABLE IF NOT EXISTS ' +comp.get()+ ' (Id INTEGER PRIMARY KEY AUTOINCREMENT, Datestamp TEXT, MaxWeight INTEGER, Reps INTEGER)') #SQL syntax
date = datetime.date(int(year.get()),int(month.get()), int(day.get())) #Date in format from 'import datetime'
c.execute('INSERT INTO '+comp.get()+' (Datestamp, MaxWeight, Reps) VALUES (?, ?, ?)',(date, weight.get(), reps.get())) #Insert record into database.
con.commit()
clear()
#Clear boxes when submit button is hit
def delete():
c.execute('delete from '+comp.get()+' where Id in (select max(id) from '+comp.get()+')')
con.commit()
def update():
c.execute('select * from '+comp.get())
data = list(c)[0]
date = data[1].split('-')
set_vals(comp.get(), date[2], date[1], date[0], data[2], data[3])
delete()
con.commit()
def record():
c.execute('SELECT * FROM ' +comp.get()) #Select from which ever compound lift is selected
frame = Frame(window)
frame.place(x= 500, y = 150)
Lb = Listbox(frame, height = 13, width = 35,font=("arial", 15))
Lb.pack(side = LEFT, fill = Y)
scroll = Scrollbar(frame, orient = VERTICAL) # set scrollbar to list box for when entries exceed size of list box
scroll.config(command = Lb.yview)
scroll.pack(side = RIGHT, fill = Y)
Lb.config(yscrollcommand = scroll.set)
# Lb.insert(0, 'Date, Max Weight, Reps') #first row in listbox
data = c.fetchall() # Gets the data from the table
for row in data:
nrow = "{0:10} : {1:<4s}x {2:<4s}kg".format(str(row[1]), str(row[3]), str(row[2]))
Lb.insert(1,nrow) # Inserts record row by row in list box
L7 = Label(window, text = comp.get()+ ' Workouts : In order of entry',
font=("arial", 16)).place(x=500,y=100) # Title of list box, given which compound lift is chosen
con.commit()
button_1 = Button(window, text="Submit",command=get)
button_1.place(x=150,y=400)
button_1.config(width=10)
button_2 = Button(window,text= "Clear",command=clear)
button_2.place(x=10,y=400)
button_2.config(width=10)
button_3 = Button(window,text="Open DB",command=record)
button_3.place(x=290,y=435)
button_3.config(width=10)
button_1 = Button(window, text="Delete Last",command=delete)
button_1.place(x=150,y=435)
button_1.config(width=10)
button_5 = Button(window,text="Quit",command=window.destroy)
button_5.place(x=10,y=470)
button_5.config(width=10)
button_4 = Button(window,text="Update Last",command=update)
button_4.place(x=10,y=435)
button_4.config(width=10)
window.mainloop() #mainloop() -> make sure that window stays open
``` |
{
"source": "64u71bm/Multiplayer-Battleships",
"score": 3
} |
#### File: 64u71bm/Multiplayer-Battleships/client.py
```python
import traceback
from socket import *
import warcode as wc
import sys
import configurationmanager as cm
from random import randint
import battleshiphelp as h
import securedsocket as ss
SINGLE_PLAYER_OPTION = "1"
MULTI_PLAYER_OPTION = "2"
JOIN_PLAYER_OPTION = "3"
QUIT_OPTION = "4"
class User:
def __init__(self,name):
self.name = name
self.host_name = cm.server_host
self.tcp_port = cm.tcp_server_port
self.threads = []
self.messages = []
self.running = True
self.code = wc.WarCode() # war codes translator
def quit(self):
self.running = False
for t in self.threads:
t.join()
try:
self.sslsocket.close()
except:
pass
def set_connections_threads(self):
print("Connecting server to port ..." + str(self.tcp_port))
temp_socket = socket(AF_INET, SOCK_STREAM)
temp_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
temp_socket.connect((self.host_name,self.tcp_port))
temp_socket = ss.RSASocket(temp_socket)
temp_socket.send(self.name)
# preparing new port
plain_socket = socket(AF_INET, SOCK_STREAM)
plain_socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sslsocket = ss.RSASocket(plain_socket) # encrypted socket
error = True
while error:
try:
port = int(temp_socket.recv(1024))
self.sslsocket.connect((self.host_name, port)) # connect to the new port
error = False
except Exception as e:
print(e)
traceback.print_exc(file=sys.stdout)
return False
temp_udp_port = randint(2000,60000)
temp_udp_server_socket = socket(AF_INET, SOCK_DGRAM)
temp_udp_server_socket.bind(('', temp_udp_port))
# to find out what port is assigned to the udp sender socket
temp_send_udp_client_socket = socket(AF_INET, SOCK_DGRAM)
temp_send_udp_client_socket.sendto("Test".encode(), (cm.server_host, temp_udp_port))
_,chat_sending_address = temp_udp_server_socket.recvfrom(1024)
self.sslsocket.send(str(chat_sending_address))
self.sslsocket.recv(1024)
# to find out what port is assigned to the udp receiver socket
temp_recv_udp_client_socket = socket(AF_INET, SOCK_DGRAM)
temp_recv_udp_client_socket.sendto("Test".encode(), (cm.server_host, temp_udp_port))
_,chat_receiving_address = temp_udp_server_socket.recvfrom(1024)
self.sslsocket.send(str(chat_receiving_address))
self.sslsocket.recv(1024)
try:
code = str(randint(0,10000))
chat_file_name = "client" + code
with open(chat_file_name, "a") as my_file:
my_file.write(self.name + "\n")
my_file.write(str(chat_sending_address)[1:-1] + "\n")
my_file.write(str(chat_receiving_address)[1:-1])
print("If you want to use the chat use this code: " + code)
except Exception as e:
traceback.print_exc(file=sys.stdout)
print(e)
print("Unexpected error, not chat client available")
temp_socket.close()
temp_recv_udp_client_socket.close()
temp_send_udp_client_socket.close()
return True
# run
def run(self):
while self.running:
self.menu()
if(self.code.is_quitting_option):
print("Good bye")
elif (self.code.is_single_player_option):
self.play_single()
elif(self.code.is_multiplayer_option):
self.play_multiplayer()
elif(self.code.is_join_option):
self.join_game()
elif(self.code.is_medal_option):
self.medals()
elif(self.code.is_help_option):
self.help()
else:
print("Wrong, please enter again")
# follows the communication steps through the menu
def menu(self):
menu = self.code.translate(self.receive())
print(menu)
answer = str(input(": ")).upper()
coded = self.code.main_option(answer)
self.send(coded)
self.code.translate(coded)
self.running = not self.code.is_quitting_option
# follows the communication steps through the game
def play_single(self):
quit = False
while not quit :
server_msg = self.receive()
msg = self.code.translate(server_msg)
if (self.code.is_boards):
self.print_boards(msg)
else:
print(msg)
enemies = self.code.enemies
if(self.code.game_finished()):
return
valid = False
ans = ""
formatted_answer = ""
while not valid:
ans = input("Enter your coordinates x(0-5) y(0-5):")
quit = ans.upper() == QUIT_OPTION
if(quit): # player wants to quit
formatted_answer = self.code.quit() # code the quit message
break # no validation needed
if (len(ans.split())==2):
ans = "4 " + ans
valid = self.code.valid_shot(ans.strip(),enemies)
if (not valid):
print("Not valid values, please enter valid coordinates")
if(not quit): # we are not quiting
formatted_answer = self.code.shoot(ans.strip()) # we have a valid shoot
self.send(formatted_answer) # send the resulting answer
# creates a multiplayer game
def play_multiplayer(self):
valid_players = False
msg = self.receive()
while not valid_players:
ans = input(self.code.translate(msg))
try:
quit = str(ans) == "Q"
if (quit):
return
num = int(ans)
valid_players = num > 1
except Exception as e :
print("Not a valid option, please enter valid number")
self.send(self.code.players_option(num))
self.play_game_together()
# joins a game
def join_game(self):
valid_game = False
msg = self.receive()
self.code.translate(msg)
if(self.code.no_game):
print("No game open. Sending back to the main menu...")
return
while not valid_game:
ans = input(self.code.translate(msg))
try:
quit = str(ans) == "Q"
num = int(ans)
valid_game = self.code.valid_game_option(str(ans)) or quit
except Exception as e :
print(e)
traceback.print_exc(file=sys.stdout)
print("Not a valid option, please enter valid number")
self.send(self.code.game_option(str(num)))
self.play_game_together()
# this is the shared code for multiplayer-join options
def play_game_together(self):
valid_team = False
msg = self.receive()
while not valid_team:
ans = input(self.code.translate(msg))
try:
quit = str(ans) == "Q"
team = int(ans)
valid_team = team > 0 and team < 6
except:
print("Not a valid team, please enter valid option")
self.send(self.code.teams_option(team))
print("Waiting for others players to connect...")
msg = self.code.translate(self.receive()) # receive the initial board or waiting termination
if(not self.code.abort_waiting):
print(self.code.translate(msg)) # show to the player
self.send(self.code.acknowledgement()) # send acknowledgement back
while not quit :
in_turn = False
while not in_turn:
msg = self.receive()
all_boards_msg = self.code.translate(msg)
in_turn = self.code.is_in_turn
if (in_turn):
self.print_boards(self.code.translate(all_boards_msg))
enemies = self.code.enemies
if (not in_turn and not (self.code.won or self.code.lost)):
self.send(self.code.acknowledgement()) # send acknowledgement back
print("Wait for your turn " + self.name)
if (self.code.won or self.code.lost): # nothing else to do
won_msg = "Congratulation you won!!"
lost_msg = "You lost, good luck next time"
print(won_msg if self.code.won else lost_msg) # feedback to player
if (self.code.won):
self.send(self.code.acknowledgement())
return
valid = False
formatted_answer = ""
while not valid:
ans = input("Enter the team and the coordinates team(#) x(0-5) y(0-5):")
quit = ans.upper() == QUIT_OPTION
if(quit): # player wants to quit
formatted_answer = self.code.quit() # code the quit message
break # no validation needed
valid = self.code.valid_shot(ans.strip(),enemies)
if (not valid):
print("Not valid, please enter again")
if(not quit): # we are not quiting
formatted_answer = self.code.shoot(ans.strip()) # we have a valid shoot
self.send(formatted_answer) # send the resulting answer
else:
self.send(self.code.acknowledgement())
# medals
def medals(self):
print(self.code.translate(self.receive()))
# show help
def help(self):
self.receive()
h.show_help()
# sends a packet thru the sslsocket
def send(self,msg):
self.sslsocket.send(msg)
# receive a packet thru the sslsocket
def receive(self):
return self.sslsocket.recv(1024)
# formats the boards on the shell
def print_boards(self,boards):
e_index = boards.index("E")
friend_section = boards[:e_index].strip().split(" ")
enemy_section = boards[e_index:].strip().split(" ")
self.print_section(friend_section)
print("------------------------------------------------------------------")
self.print_section(enemy_section)
print("------------------------------------------------------------------")
# prints one row of boards either the enemies of the friends
def print_section(self,section):
print("Friends" if section[0].strip() == "F" else "Enemies")
lines = ["" for _ in range(7)]
for board in section[1:]:
comma = board.index(",")
lines[0] += board[:comma].strip() + " "
board_line = board[comma+1:].split()
for i in range(0,6):
lines[i+1] += board_line[i] + " "
for line in lines:
print(line)
def verification():
udp_socket = socket(AF_INET, SOCK_DGRAM)
verif =False
try:
udp_socket.sendto("OPEN".encode(),(cm.server_host,cm.udp_ping_port))
udp_socket.settimeout(20)
_,_ = udp_socket.recvfrom(1024)
verif = True
except socket.timeout:
print("Server is down!!!")
finally:
udp_socket.close()
return verif
# Entry point
def main():
print("Welcome to the battleship game")
sys.stdout.write("Verifying if the server is ready...")
if(not verification()):
return
print("Done!!")
user_name = input("Please enter your user name: ")
print("Thank you " + user_name)
u = User(user_name)
try:
ok = u.set_connections_threads()
if not ok:
return
u.run()
except KeyboardInterrupt:
print("Keyboard Interrupt. Time to say goodbye!!!")
except Exception as e:
print(e)
traceback.print_exc(file=sys.stdout)
print("There was a problem. Game aborted")
finally:
u.quit()
sys.exit(0)
if __name__ == "__main__":
main()
```
#### File: 64u71bm/Multiplayer-Battleships/gameserverstatus.py
```python
import time
import player
import warcode as wc
import sys, traceback
BLUE_INDEX = 0
RED_INDEX = 1
YELLOW_INDEX = 2
GREEN_INDEX = 3
WHITE_INDEX = 4
BLUE_TEAM = "BLUE"
RED_TEAM = "RED"
YELLOW_TEAM = "YELLOW"
GREEN_TEAM = "GREEN"
WHITE_TEAM = "WHITE"
class ServerStatus:
def __init__(self,max):
self.game_id = 0 # game id will increase for every game initiated
self.players = [] # list of all active players in the game
self.max_player_connected = max # maximum amount of connected players
self.code = wc.WarCode()
# contains a list with all players connected to the server
# an index (game_id) to identify each game
# special structure: games (dictionary)
# this instance variable has all the information about all games in the server
# the keys are accessed through game_id (integer incremented every tome a game is created)
# the values are formed by:
# teams : dictionary of players per team (dictionary of players lists)
# max_player : maximum amount of player allowed (integer) in this game
# open : game status true = receiving players (boolean)
# deadlock : indicates if the game is in deadlock (boolean)
# timestamp : to control the waiting time of the user to avoid deadlock (time)
self.games = {}
# "creates" another key for a new game and returns it
def new_game_id(self):
self.game_id += 1
return self.game_id
# adds a single player game
def add_singleplayer_game(self,this_game_id,player):
self.add_multiplayer_game(this_game_id,player,1,WHITE_TEAM)
# adds a new game & player to the server_games
def add_multiplayer_game(self,this_game_id,player,max_players,team):
player.team = team # redundant(below) but used to improve efficiency
player.activate(this_game_id) # player was assigned a board
teams = {} # a dictionary of players per teams
teams[team] = [player],0 # first player in this team, player in turn
is_open = team != WHITE_TEAM
self.games[this_game_id] = \
{
"teams" : teams, # game teams
"pl_count" : 1, # how many players desired
"max_player": max_players, # max players allowed
"open" : is_open, # an open game is missing players
"deadlock" : False, # a new game is never in deadlock
"timestamp" : time.time(), # to control how long this user is waiting for other players
"turn" : self.get_team_index(team) # first turn for this player
}
# is the player waiting too much?
def waiting_too_long(self,this_game_id,time):
return time - self.games[this_game_id]["timestamp"] > 60 # one minute is OK, no more
# adds a player to a team in an open game in the server_games
def update_game_player(self,this_game_id, player,team):
teams = self.games[this_game_id]["teams"]
player.game = this_game_id # to improve efficiency
player.team = team # to improve efficiency
if (team in teams): # is there are players in the team
players,turn = teams[team] # get the players
players.append(player) # add them to the team
else:
players = [] # no players in that team create a list
players.append(player) # append this player
teams[team] = players,0 # create the team in the tree
self.games[this_game_id]['open'] = len(self.player_game_players(player)) < self.games[this_game_id]["max_player"]
def remove_player(self,game_id,player):
players,turn = self.games[game_id]["teams"][player.team]
index = players.index(player)
players.remove(player)
if turn >= index:
turn -= 1
player.deactivate()
if(players): # more players in the team???
self.games[game_id]["teams"][player.team] = players, turn
else:
self.games[game_id]["teams"].pop(player.team,None) # no more players, remove the team
# fix any game in deadlock
def detect_fix_deadLocks(self):
for game_id in list(self.games):
if(self.is_in_deadlock(game_id)):
self.release_game_resources(game_id)
# rule that define a game in deadlock
def is_in_deadlock(self, game_id):
return self.games[game_id]["deadlock"]
# Inactivates all the players in the specified game and removes the game from the server
def release_game_resources(self, this_game_id):
teams = list(self.games[this_game_id]["teams"]) # all the team in the game
game_won = not self.games[this_game_id]["open"] # the gae was played if it is not open: there is a winner
for team in list(teams):
team_players, _ = self.games[this_game_id]["teams"][team] # all the player in that team
for player in team_players:
player.deactivate() # inactivate all the player
self.games[this_game_id]["teams"] = {} # just clears the team list and doesn't destroy the players
self.games.pop(this_game_id, None) # remove this game from the variable
# games finished
def detect_fix_finished_games(self,msg):
for game_id in list(self.games):
if (self.is_finished(game_id)):
self.inform_players(game_id,msg)
self.release_game_resources(game_id)
# rule that define a finished game
def is_finished(self,game_id):
teams = list(self.games[game_id]["teams"])
return (len(teams) == 1 and (teams[0]!= WHITE_TEAM and not self.games[game_id]["open"]))
# inform the players of the game
def inform_players(self,game_id,msg):
for player in list(self.players):
if(player.game == game_id):
player.won_game()
player.send(msg)
str = player.receive()
# this game is in deadlock
def set_deadlock(self,game_id):
teams = self.games[game_id]["deadlock"] = True
# returns the all the players connected
def server_players(self):
return self.players
# returns a list with all the players in the same game of the player
def player_game_players(self,player):
return [p for p in list(self.players) if p.game == player.game]
# returns the list of all the player in the same team of the player
def player_team_players(self,player):
return [p for p in list(self.players) if p.game == player.game and p.team == player.team]
# adds a new player to the game server
def add_player(self,player):
self.players.append(player)
# find a player by the name
def find_player_by_name(self,name):
try:
if (not self.players):
return None
return next((p for p in list(self.players) if p.name == name),None)
except Exception as e:
print(e)
traceback.print_exc(file=sys.stdout)
# find a player by the addr in the list
def find_player_by_tcp_address(self, address):
if(not self.players):
return None
return next(p for p in list(self.players) if p.tcp_address == address)
# find a player by the addr in the list
def find_player_by_udp_sending_address(self, address):
if (not self.players):
return None
return next(p for p in list(self.players) if p.udp_address_sending == address)
# is server full?
def is_full(self):
return self.max_player_connected == len(self.players)
# all the players
def detect_inactive_player(self):
for player in self.players:
if(player.is_inactive()):
return player
return None
# returns all open games
def open_games(self):
return [g_id for g_id in list(self.games) if self.games[g_id]["open"]]
# returns if the game is open
def is_game_open(self,game_id):
return self.games[game_id]["open"]
# returns the player in turn
def get_player_in_turn(self,game_id):
game = self.games[game_id] # this game
teams = game["teams"] # its teams
team = self.get_team_str(game["turn"]) # team in turn
players,turn = teams[team] # get its players and turn
if (players):
turn = turn % len(players)
return players[turn]
return None
# get next team in turn with players
def next_team(self,game_id):
if (game_id in self.games):
game = self.games[game_id] # get the game
teams = game["teams"] # get the teams in that game
turn = game["turn"] # get the team in turn
there_is_team = False #
while not there_is_team: # iterate until there is a team with players
turn = (turn + 1) % 4 # next team
team_str = self.get_team_str(turn) #
there_is_team = team_str in teams # found the team
return turn # return it
return -1
# changes the values of the internal variables to the next player
def next_player(self,game_id):
if (game_id in self.games):
game = self.games[game_id] # this game
teams = game["teams"] # its teams
turn = self.next_team(game_id)
if (turn>=0):
game["turn"] = self.next_team(game_id) # next team
players,turn = teams[self.get_team_str(game["turn"])] # get its players and turn
teams[self.get_team_str(game["turn"])] = players,turn+1 # next player
# returns the string representation for the team index
def get_team_str(self,index):
if (index == BLUE_INDEX):
return BLUE_TEAM
elif (index == RED_INDEX):
return RED_TEAM
elif (index == YELLOW_INDEX):
return YELLOW_TEAM
elif (index == GREEN_INDEX):
return GREEN_TEAM
else:
return WHITE_TEAM
# returns the index of the team
def get_team_index(self,team):
if (team == BLUE_TEAM):
return BLUE_INDEX
elif (team == RED_TEAM):
return RED_INDEX
elif (team == YELLOW_TEAM):
return YELLOW_INDEX
elif (team == GREEN_TEAM):
return GREEN_INDEX
else:
return WHITE_INDEX
# returns the player's teammates
def get_friends(self,game_id,player):
team_mates,_ = self.games[game_id]["teams"][player.team] # all the players in the player's team
return team_mates
# return the list of all the player's enemies in the game
def get_enemies(self,game_id,player):
result = []
teams = self.games[game_id]["teams"] # this game's teams
for team in list(teams): # for all
if (player.team == team): # compare with player's team
continue # if it is the same team then get next team and do nothing
players,turn = teams[team] # get the players
result.extend(players) # adds all the players of this team to the list
return result # returns all the players
```
#### File: 64u71bm/Multiplayer-Battleships/securedsocket.py
```python
class RSASocket:
def __init__(self,socket):
self.socket = socket
p = 3
q = 5
n = p*q # 15
z = (p-1)*(q-1) # 8
e = 7 # e < z and coprime with z
d = 7 # d*e mod z = 1 => 901 mod 60 = 1
self.encrypted = False
self.public_key = e,n
self.private_key = d,n
def connect(self,regular_parameters):
self.socket.connect(regular_parameters)
def send(self,msg):
if(self.encrypted):
self.socket.send(self.encrypt(msg).encode())
else:
self.socket.send(msg.encode())
def recv(self,num):
if (self.encrypted):
return self.decrypt(self.socket.recv(num).decode())
else:
return self.socket.recv(num).decode()
def sendto(self,msg,address):
if (self.encrypted):
self.socket.sendto(self.encrypt(msg).encode(), address)
else:
self.socket.sendto(msg.encode(), address)
def recvfrom(self,num):
msg, addr = self.socket.recvfrom(num)
if (self.encrypted):
return self.decrypt(msg.decode()), addr
else:
return msg.decode(), addr
def decrypt(self,cipher_text):
d, n = self.private_key
print("before :" + cipher_text)###############
nums = [int(num) for num in cipher_text[1:-1].split(",")]
print("nums before :" + str(nums))################
plain = [chr((num) ** d) % n for num in nums]
#plain = [chr((char ** d) % n) for char in cipher_text]
return ''.join(plain)
def encrypt(self,msg):
e, n = self.public_key
cipher_text = [(ord(char) ** e) % n for char in msg]
print( "cipher text :" + str(cipher_text))
return str(cipher_text)
def close(self):
self.socket.close()
``` |
{
"source": "64u71bm/Python-Programs",
"score": 4
} |
#### File: 64u71bm/Python-Programs/perfect.py
```python
def perfect(n):
s=0
m=n
for i in range(1,n):
if(n%i==0):
s=s+i
if(s==m):
print "perfect no."
else:
print "not perfect no."
k=input("enter the no. ")
perfect(k)
``` |
{
"source": "651juan/ProjectAI",
"score": 3
} |
#### File: 651juan/ProjectAI/api_functions.py
```python
import argparse
import io
import pickle
import zipfile
from zipfile import ZipFile
import shutil
import ntpath
import requests
import json
import math
import numpy as np
import cv2
import os
import time
def create_zip_from_dataset(input_folder, output_folder, num_images=3, bundle_size=16):
if num_images * bundle_size > 50:
print("You're using more than 50 images per bundle, API will probably reject this")
return
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
# get all folders in input_folder
print("Reading folders...")
folders = [f for f in os.listdir(input_folder) if os.path.isdir(os.path.join(input_folder, f))]
# iterate through bundles, assign name and folder
for bundle_index in range(int(math.ceil(len(folders) / bundle_size))):
bundle_name = 'bundle' + '_' + str(bundle_index)
image_dump_folder = os.path.join(output_folder, bundle_name)
os.mkdir(image_dump_folder)
# for each folder belonging to this bundle
for folder in folders[bundle_index * bundle_size:bundle_index * bundle_size + bundle_size]:
print(folder)
# get all files in each folder, take only a specified number of them
folder_path = os.path.join(input_folder, folder)
files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
selected_files = files[:num_images]
# copy each file to the image_dump_folder and rename it to the form foldername_i.jpg
for i, file in enumerate(selected_files):
shutil.copy2(os.path.join(input_folder, folder, file), image_dump_folder)
new_file_name = folder + '_' + str(i) + '.jpg'
os.rename(os.path.join(image_dump_folder, file), os.path.join(image_dump_folder, new_file_name))
# make archive from the images
shutil.make_archive(os.path.join(output_folder, bundle_name), "zip", image_dump_folder)
# delete the images, keep the zip only, then go to the next bundle
shutil.rmtree(image_dump_folder)
def download_api_results(output_folder, bundle_dict_path, sleep_time):
bundle_dict = pickle.load(open(bundle_dict_path, 'rb'))
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
# for bundle, response in bundle_dict.items():
# print(bundle, response, "<Response [200]>" not in str(response))
for bundle, response in bundle_dict.items():
if "<Response [200]>" not in str(response):
continue
bundle_folder = os.path.join(output_folder, bundle.split(".")[0])
if os.path.isdir(bundle_folder):
print("Already downloaded! Skipping " + bundle)
continue
print("Getting " + bundle)
process_id = json.loads(response.text)['processId']
# build the download link using processId
url = "http://yourface.3duniversum.com/uploaded/" + process_id + "/batch_processed.zip"
start_time = time.time()
res = requests.get(url)
if not res.ok:
print("File not found! Skipping " + bundle)
continue
try:
zip_file = zipfile.ZipFile(io.BytesIO(res.content))
except Exception as e:
print(str(e) + "skipping bundle " + bundle)
continue
zip_file.extractall(bundle_folder)
zip_file.close()
print("Download successful! Time: " + str(time.time() - start_time))
time.sleep(sleep_time)
"""
ordering cancer:
0.0 -> 0
0.1 -> 1
0.2 -> 12
1.0 -> 17
1.1 -> 18
1.2 -> 19
2.0 -> 20
2.1 -> 21
2.2 -> 22
3.0 -> 23
3.1 -> 2
3.2 -> 3
4.0 -> 4
4.1 -> 5
4.2 -> 6
5.0 -> 7
5.1 -> 8
5.2 -> 9
6.0 -> 10
6.1 -> 11
6.2 -> 13
7.0 -> 14
7.1 -> 15
7.2 -> 16
"""
# def add_api_results(path_to_zip, path_to_dataset, num_images, num_augments):
def reorganize_results(bundle_folder, processed_bundle_folder, output_folder, augment_types):
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
# get all processed bundles(folders)
processed_bundles = [f for f in os.listdir(processed_bundle_folder) if
os.path.isdir(os.path.join(processed_bundle_folder, f))]
for bundle in processed_bundles:
# open the original zip and get the list of file names
zip = zipfile.ZipFile(os.path.join(bundle_folder, bundle + ".zip"))
orig_file_names = zip.namelist()
# I'm so titled I'm not even going to bother explaining why this is needed
cancer = [0, 1, 12, 17, 18, 19, 20, 21, 22, 23, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16]
for i, file_name in enumerate(orig_file_names):
# the beginning of the file name is the name of the original folder = the identity of the person
folder_name = file_name.split("_")[0]
if not os.path.isdir(os.path.join(output_folder, folder_name)):
os.mkdir(os.path.join(output_folder, folder_name))
# copy all augmented files to that person's folder
for augment in augment_types:
# i represents the number of the folder inside the processed bundle, each containts multiple augments for 1 image
shutil.copy2(os.path.join(processed_bundle_folder, bundle, str(cancer[i]), augment),
os.path.join(output_folder, folder_name))
os.rename(os.path.join(output_folder, folder_name, augment),
os.path.join(output_folder, folder_name, file_name.split(".")[0] + "_" + augment))
"""
for i, file_name in enumerate(orig_file_names):
# the beginning of the file name is the name of the original folder = the identity of the person
folder_name = file_name.split("_")[0]
if not os.path.isdir(os.path.join(output_folder, folder_name)):
os.mkdir(os.path.join(output_folder, folder_name))
# copy all augmented files to that person's folder
for augment in augment_types:
# i represents the number of the folder inside the processed bundle, each containts multiple augments for 1 image
shutil.copy2(os.path.join(processed_bundle_folder, bundle, str(i), augment),
os.path.join(output_folder, folder_name))
os.rename(os.path.join(output_folder, folder_name, augment),
os.path.join(output_folder, folder_name, file_name.split(".")[0] + "_" + augment))
"""
def query_api(yaw, pitch, roll, bundle_path, email):
url = 'http://yourface.3duniversum.com/api/faceGen/upload'
file = open(bundle_path, 'rb') # flat structure zip file
files = {'images': (bundle_path, file)}
payload = {
"glasses": {
"models": ["HazzBerry"], # available glasses model
"materials": [
{
"name": "Obsidian Black", # tag name
"frame": {"color": "rgb(0,0,0)"}, # frame color
"glass": {"color": "rgb(255, 255, 255)", "opacity": 0.3} # glass may have shader issue
}
]
},
"poses": [
{
"yaw": yaw, # it can be range (min, max, interval)
"pitch": pitch,
"roll": roll # or just a single value
}
]
}
data = {
"variants": json.dumps(payload),
"email": email,
}
r = requests.post(url, files=files, data=data)
file.close()
# r_json = json.loads(r.text)
# print(r)
# print(type(r_json))
# print(r_json['processId'])
return r
def query_api_testing():
import requests
import json
import os
url = 'http://yourface.3duniversum.com/api/faceGen/upload'
file = open('bundle_0.zip', 'rb') # flat structure zip file
files = {'images': ('bundle_0.zip', file)}
payload = {
"glasses": {
# "models": ["HazzBerry", "GerretLight", "Enzo", "M14", "M10"], # available glasses model
"models": ["HazzBerry"], # available glasses model
"materials": [
{
"name": "<NAME>", # tag name
"frame": {"color": "rgb(0,0,0)"}, # frame color
"glass": {"color": "rgb(255, 255, 255)", "opacity": 0.3} # glass may have shader issue
}
# {
# "name": "<NAME>",
# "frame": { "color": "rgb(168, 32, 26)" },
# "glass": { "color": "rgb(255, 255, 255)", "opacity": 0.3 }
# },
# {
# "name": "<NAME>",
# "frame": { "color": "rgb(255, 242, 0)" },
# "glass": { "color": "rgb(255, 255, 255)", "opacity": 0.3 }
# },
# {
# "name": "<NAME>",
# "frame": { "color": "rgb(66, 134, 244)" },
# "glass": { "color": "rgb(255, 255, 255)", "opacity": 0.3 }
# },
# {
# "name": "<NAME>",
# "frame": { "color": "rgb(59, 173, 46)" },
# "glass": { "color": "rgb(255, 255, 255)", "opacity": 0.3 }
# }
]
},
"poses": [
{
"yaw": [-30, 30, 30], # it can be range (min, max, interval)
# "pitch": [-15, 15, 15],
# "yaw": 0, # it can be range (min, max, interval)
"pitch": 0,
# "yaw": 0,
# "pitch": 0,
"roll": 0 # or just a single value
}
]
}
data = {
"variants": json.dumps(payload),
"email": "<EMAIL>",
}
r = requests.post(url, files=files, data=data)
file.close()
print(r)
```
#### File: 651juan/ProjectAI/dataset_functions.py
```python
import argparse
import zipfile
import time
import shutil
import ntpath
import numpy as np
import cv2
import os
def reduce_dataset(input_folder, output_folder, num_images):
# folders = os.listdir(input_folder)
replicate_folder_structure(input_folder, output_folder)
folders = [f for f in os.listdir(input_folder) if os.path.isdir(os.path.join(input_folder, f))]
for folder in folders:
print(folder)
folder_path = os.path.join(input_folder, folder)
files = [f for f in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, f))]
selected_files = files[:num_images]
for file in selected_files:
shutil.copy2(os.path.join(input_folder, folder, file), os.path.join(output_folder, folder))
def replicate_folder_structure(input_folder, output_folder):
print("Replicating folder structure...")
if not os.path.isdir(output_folder):
os.mkdir(output_folder)
for dirpath, dirnames, filenames in os.walk(input_folder):
for dir in dirnames:
path = os.path.join(output_folder, dir)
if not os.path.isdir(path):
os.mkdir(path)
else:
print("Folder already exists, skipping...")
``` |
{
"source": "651juan/Teaching_a_chatbot",
"score": 3
} |
#### File: 651juan/Teaching_a_chatbot/movie_data.py
```python
class MovieData:
def __init__(self,movie_name,imdb_id,plot,review,facts_table,comments,spans,labels,chat,chat_id):
self.movie_name=movie_name
self.imdb_id=imdb_id
self.plot=plot
self.review=review
self.facts_table=facts_table
self.comments=comments
self.spans=spans
self.labels=labels
self.chat=[]
if(chat is not None):
self.chat.append(Chat(chat_id,chat))
class Chat:
def __init__(self,chat_id,chats):
self.chat=[]
if(len(chats)%2!=0):
le=len(chats)-1
else:
le=len(chats)
self.chat_id=chat_id
self.encoder_chat=[]
self.decoder_chat=[]
try:
for i in range(0, le, 2):
if(i>=2):
self.encoder_chat.append("<SOS> "+chats[i-2]+" <EOS>"+" <SOS> "+chats[i-1]+" <EOS> "+chats[i])
else:
self.encoder_chat.append(chats[i])
self.decoder_chat.append(chats[i + 1])
self.chat.append(self.encoder_chat)
self.chat.append(self.decoder_chat)
except:
print("Error")
```
#### File: 651juan/Teaching_a_chatbot/read_data.py
```python
import json
from pprint import pprint
from movie_data import MovieData,Chat
import pickle
data=''
with open('train_data.json') as f:
data = json.load(f)
key=''
movie={}
tot=0
document_keys=['plot','review','comments']
other_keys=['movie_name','spans','chat']
w2i={}
i2w={}
dic_freq={}
w2i['<PAD>']=0
w2i['<SOS>'] = 1
w2i['<EOS>'] = 2
w2i['unknown']=3
i2w[0]='<PAD>'
i2w[1] = '<SOS>'
i2w[2] = '<EOS>'
i2w[3]='unknown'
index=4
def clean_data(data):
for key in data:
key['documents']['plot']=clear_quotes(key['documents']['plot'])
key['documents']['review']=clear_quotes(key['documents']['review'])
key['documents']['comments']=clear_quotes(key['documents']['comments'])
key['spans']=clear_quotes(key['spans'])
key['chat']=clear_quotes(key['chat'])
return data
def clear_quotes(data):
if isinstance(data,str):
data=[data]
for i in range(0, len(data)):
val = data[i]
val = val.replace('"', '')
val = val.replace('', '')
val = val.replace("'", "")
val = val.replace("(", "")
val = val.replace(")", "")
val = val.replace("?", "")
val = val.replace("!", "")
val = val.replace(".", "")
val = val.replace(",", "")
val=val.replace("*","")
val = val.replace("=", "")
val=val.lower()
data[i] = val
return data
def create_word_frequency(data):
for key in data:
for doc_key in document_keys:
values=key['documents'][doc_key]
helper_word_to_freq(values)
for other_key in other_keys:
values=key[other_key]
helper_word_to_freq(values)
def helper_word_to_freq(values):
if (isinstance(values, str)):
values = [values]
for value in values:
value_arr = value.split()
for word in value_arr:
if (word not in dic_freq):
dic_freq[word]=1
else:
dic_freq[word]=dic_freq[word]+1
def create_word_to_ind(data):
for key in data:
for doc_key in document_keys:
values=key['documents'][doc_key]
helper_word_to_index(values)
for other_key in other_keys:
values=key[other_key]
helper_word_to_index(values)
def helper_word_to_index(values):
global index
if(isinstance(values,str)):
values=[values]
for value in values:
value_arr=value.split()
for word in value_arr:
if(word not in w2i):
if(dic_freq[word]>15):
w2i[word] = index
i2w[str(index)] = word
index = index + 1
else:
w2i[word] = 3
def convert_data_to_obj(data):
all_ids = []
for key in data:
imdb_id = key['imdb_id']
if (imdb_id in movie):
movie_data = movie[imdb_id]
#print(movie_data)
chat = key['chat']
#print(chat)
chat_id = key['chat_id']
chat_data = Chat(chat_id, chat)
movie_data.chat.append(chat_data)
curr_review = key['documents']['review']
for reviews in curr_review:
if (reviews not in movie_data.review):
movie_data.review.append(reviews)
curr_fact_table = key['documents']['fact_table']
for fact_key in curr_fact_table:
if (fact_key not in movie_data.facts_table):
movie_data.facts_table[fact_key] = curr_fact_table[fact_key]
curr_comments = key['documents']['comments']
for comment in curr_comments:
if (comment not in movie_data.comments):
movie_data.comments.append(comment)
curr_span = key['spans']
for span in curr_span:
if (span not in movie_data.spans):
movie_data.spans.append(span)
else:
plot = key['documents']['plot']
if all('' == s or s.isspace() for s in plot):
continue
review=(key['documents']['review'])
if all('' == s or s.isspace() for s in review):
continue
fact_table = key['documents']['fact_table']
if all('' == s or s.isspace() for s in fact_table):
continue
comments = key['documents']['comments']
if all('' == s or s.isspace() for s in comments):
continue
movie_name = key['movie_name']
if all('' == s or s.isspace() or s.isdigit() for s in movie_name):
continue
spans = key['spans']
if all('' == s or s.isspace() for s in spans):
continue
labels = key['labels']
if all('' == s for s in labels):
continue
if ("chat" in key):
chat = key["chat"]
else:
chat = None
movie_data = MovieData(movie_name, imdb_id, plot, review, fact_table, comments, spans, labels, chat,
key['chat_id'])
movie[imdb_id] = movie_data
return movie
data=clean_data(data)
create_word_frequency(data)
create_word_to_ind(data)
convert_data_to_obj(data)
with open('w_freq.json', 'w') as fp:
json.dump(dic_freq, fp)
with open('w2i.json', 'w') as fp:
json.dump(w2i, fp)
with open('i2w.json', 'w') as fp:
json.dump(i2w, fp)
with open('movie_data.pkl', 'wb') as output:
pickle.dump(movie, output, pickle.HIGHEST_PROTOCOL)
##print(key['documents']['review'])
##print(key['documents']['fact_table'])
##print(key['documents']['comments']) array
##print(key['movie_name'])
##print(key['spans']) array
##print(key['labels']) array
##print(key['imdb_id'])
##print(key['chat_id'])
##print(key['chat']) array
``` |
{
"source": "654wak654/fc2",
"score": 3
} |
#### File: 654wak654/fc2/read_flt_log.py
```python
import struct
flightConfig = "E:/FLT_LOG.BIN"
# TODO: Also write a reader in arduino so data can be read without removing sd card
def getf(handle):
return struct.unpack("f", handle.read(4))[0]
with open(flightConfig, mode='rb') as f:
# TODO: Read the 28 initial bytes, then read below untill we get 8 null, then repeat
print("currentTime =", int.from_bytes(f.read(4), byteorder='little', signed=False))
print("altitude =", getf(f))
accX = getf(f)
print("accX =", accX)
accY = getf(f)
print("accY =", accX)
accZ = getf(f)
print("accZ =", accX)
print("acceleration =", pow(pow(accX, 2) + pow(accY, 2) + pow(accZ, 2), 0.5))
print("angleX =", getf(f))
print("angleY =", getf(f))
print("angleZ =", getf(f))
print("bmpTemperature =", getf(f))
print("mpuTemperature =", getf(f))
print("stageSeperated =", f.read(1) == 1)
print("parachuteDeployed =", f.read(1) == 1)
print("\n")
``` |
{
"source": "6561/cs501-t1-assessment",
"score": 3
} |
#### File: server/auth/views.py
```python
from flask import Blueprint, request, make_response, jsonify
from flask.views import MethodView
from project.server import bcrypt, db
from project.server.models import User
import jwt
import json
auth_blueprint = Blueprint('auth', __name__)
class RegisterAPI(MethodView):
"""
User Registration Resource
"""
def get(self):
responseObject = {
'status': 'success',
'message': 'Request successful but please send an HTTP POST request to register the user.'
}
return make_response(jsonify(responseObject)), 201
def post(self):
# get the post data
post_data = request.get_json();
print(request);
# check if user already exists
user = User.query.filter_by(email=post_data.get('email')).first()
if not user:
print("heeeeeeere")
try:
user = User(
email=post_data.get('email'),
password=post_data.get('password')
)
# insert the user
db.session.add(user)
db.session.commit()
# generate the auth token
auth_token = user.encode_auth_token(user.id)
responseObject = {
'status': 'success',
'message': 'Successfully registered.',
'auth_token': user.decode_auth_token(auth_token)
}
return make_response(jsonify(responseObject)), 201
except Exception as e:
print("THIS IS e")
print(e)
print("THAT WAS e")
responseObject = {
'status': 'fail',
'message': 'Some error occurred. Please try again.'
}
return make_response(jsonify(responseObject)), 401
else:
responseObject = {
'status': 'fail',
'message': 'User already exists. Please Log in.',
}
return make_response(jsonify(responseObject)), 202
class ListingAPI(MethodView):
"""
User List Resource
"""
def get(self):
print("TRYING UP IN HERE")
ulist = User.query.all()
print("TRYING 2")
retlist = []
print("RETLIST LENGTH")
print(retlist.length)
print("it was that")
for u in ulist:
retlist.append({'Email':u.email,'Registered_on':str(u.registered_on),'Admin':str(u.admin)})
responseObject = {
'status': 'success',
'message': retlist
}
return make_response(jsonify(responseObject)), 201
# define the API resources
registration_view = RegisterAPI.as_view('register_api')
listing_view = ListingAPI.as_view('listing_api')
# add Rules for API Endpoints
auth_blueprint.add_url_rule(
'/auth/register',
view_func=registration_view,
methods=['POST', 'GET']
)
auth_blueprint.add_url_rule(
'/users/index',
view_func=listing_view,
methods=['GET']
)
``` |
{
"source": "6607changchun/pytorch",
"score": 2
} |
#### File: test/jit/test_save_load_for_op_version.py
```python
from itertools import product as product
import io
import os
import sys
import hypothesis.strategies as st
from hypothesis import example, settings, given
from typing import Union
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
from torch.jit.mobile import _load_for_lite_interpreter
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
class TestSaveLoadForOpVersion(JitTestCase):
# Helper that returns the module after saving and loading
def _save_load_module(self, m):
scripted_module = torch.jit.script(m())
buffer = io.BytesIO()
torch.jit.save(scripted_module, buffer)
buffer.seek(0)
return torch.jit.load(buffer)
def _save_load_mobile_module(self, m):
scripted_module = torch.jit.script(m())
buffer = io.BytesIO(scripted_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
return _load_for_lite_interpreter(buffer)
# Helper which returns the result of a function or the exception the
# function threw.
def _try_fn(self, fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
return e
def _verify_no(self, kind, m):
self._verify_count(kind, m, 0)
def _verify_count(self, kind, m, count):
node_count = sum(str(n).count(kind) for n in m.graph.nodes())
self.assertEqual(node_count, count)
"""
Tests that verify Torchscript remaps aten::div(_) from versions 0-3
to call either aten::true_divide(_), if an input is a float type,
or truncated aten::divide(_) otherwise.
NOTE: currently compares against current div behavior, too, since
div behavior has not yet been updated.
"""
@settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_tensor(self, sample_input):
def historic_div(self, other):
if self.is_floating_point() or other.is_floating_point():
return self.true_divide(other)
return self.divide(other, rounding_mode='trunc')
# Tensor x Tensor
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, a, b):
result_0 = a / b
result_1 = torch.div(a, b)
result_2 = a.div(b)
return result_0, result_1, result_2
# Loads historic module
try:
v3_mobile_module = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_tensor_v2.ptl")
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module = self._save_load_mobile_module(MyModule)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = torch.tensor((val_b,))
def _helper(m, fn):
m_results = self._try_fn(m, a, b)
fn_result = self._try_fn(fn, a, b)
if isinstance(m_results, Exception):
self.assertTrue(isinstance(fn_result, Exception))
else:
for result in m_results:
self.assertEqual(result, fn_result)
_helper(v3_mobile_module, historic_div)
_helper(current_mobile_module, torch.div)
@settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_tensor_inplace(self, sample_input):
def historic_div_(self, other):
if self.is_floating_point() or other.is_floating_point():
return self.true_divide_(other)
return self.divide_(other, rounding_mode='trunc')
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, a, b):
a /= b
return a
try:
v3_mobile_module = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_tensor_inplace_v2.ptl")
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module = self._save_load_mobile_module(MyModule)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = torch.tensor((val_b,))
def _helper(m, fn):
fn_result = self._try_fn(fn, a.clone(), b)
m_result = self._try_fn(m, a, b)
if isinstance(m_result, Exception):
self.assertTrue(fn_result, Exception)
else:
self.assertEqual(m_result, fn_result)
self.assertEqual(m_result, a)
_helper(v3_mobile_module, historic_div_)
# Recreates a since it was modified in place
a = torch.tensor((val_a,))
_helper(current_mobile_module, torch.Tensor.div_)
@settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_tensor_out(self, sample_input):
def historic_div_out(self, other, out):
if self.is_floating_point() or other.is_floating_point() or out.is_floating_point():
return torch.true_divide(self, other, out=out)
return torch.divide(self, other, out=out, rounding_mode='trunc')
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, a, b, out):
return a.div(b, out=out)
try:
v3_mobile_module = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_tensor_out_v2.ptl")
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module = self._save_load_mobile_module(MyModule)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = torch.tensor((val_b,))
for out in (torch.empty((1,)), torch.empty((1,), dtype=torch.long)):
def _helper(m, fn):
fn_result = None
if fn is torch.div:
fn_result = self._try_fn(fn, a, b, out=out.clone())
else:
fn_result = self._try_fn(fn, a, b, out.clone())
m_result = self._try_fn(m, a, b, out)
if isinstance(m_result, Exception):
self.assertTrue(fn_result, Exception)
else:
self.assertEqual(m_result, fn_result)
self.assertEqual(m_result, out)
_helper(v3_mobile_module, historic_div_out)
_helper(current_mobile_module, torch.div)
@settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_scalar(self, sample_input):
def historic_div_scalar_float(self, other: float):
return torch.true_divide(self, other)
def historic_div_scalar_int(self, other: int):
if self.is_floating_point():
return torch.true_divide(self, other)
return torch.divide(self, other, rounding_mode='trunc')
class MyModuleFloat(torch.nn.Module):
def __init__(self):
super(MyModuleFloat, self).__init__()
def forward(self, a, b: float):
return a / b
class MyModuleInt(torch.nn.Module):
def __init__(self):
super(MyModuleInt, self).__init__()
def forward(self, a, b: int):
return a / b
try:
v3_mobile_module_float = _load_for_lite_interpreter(
pytorch_test_dir + "/jit/fixtures/test_versioned_div_scalar_float_v2.ptl")
v3_mobile_module_int = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_int_v2.ptl")
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module_float = self._save_load_mobile_module(MyModuleFloat)
current_mobile_module_int = self._save_load_mobile_module(MyModuleInt)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = val_b
def _helper(m, fn):
m_result = self._try_fn(m, a, b)
fn_result = self._try_fn(fn, a, b)
if isinstance(m_result, Exception):
self.assertTrue(fn_result, Exception)
else:
self.assertEqual(m_result, fn_result)
if isinstance(b, float):
_helper(v3_mobile_module_float, current_mobile_module_float)
_helper(current_mobile_module_float, torch.div)
else:
_helper(v3_mobile_module_int, historic_div_scalar_int)
_helper(current_mobile_module_int, torch.div)
@settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_scalar_reciprocal(self, sample_input):
def historic_div_scalar_float_reciprocal(self, other: float):
return other / self
def historic_div_scalar_int_reciprocal(self, other: int):
if self.is_floating_point():
return other / self
return torch.divide(other, self, rounding_mode='trunc')
class MyModuleFloat(torch.nn.Module):
def __init__(self):
super(MyModuleFloat, self).__init__()
def forward(self, a, b: float):
return b / a
class MyModuleInt(torch.nn.Module):
def __init__(self):
super(MyModuleInt, self).__init__()
def forward(self, a, b: int):
return b / a
try:
v3_mobile_module_float = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_reciprocal_float_v2.ptl")
v3_mobile_module_int = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_reciprocal_int_v2.ptl")
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module_float = self._save_load_mobile_module(MyModuleFloat)
current_mobile_module_int = self._save_load_mobile_module(MyModuleInt)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = val_b
def _helper(m, fn):
m_result = self._try_fn(m, a, b)
fn_result = None
# Reverses argument order for torch.div
if fn is torch.div:
fn_result = self._try_fn(torch.div, b, a)
else:
fn_result = self._try_fn(fn, a, b)
if isinstance(m_result, Exception):
self.assertTrue(isinstance(fn_result, Exception))
elif fn is torch.div or a.is_floating_point():
self.assertEqual(m_result, fn_result)
else:
# Skip when fn is not torch.div and a is integral because
# historic_div_scalar_int performs floored division
pass
if isinstance(b, float):
_helper(v3_mobile_module_float, current_mobile_module_float)
_helper(current_mobile_module_float, torch.div)
else:
_helper(v3_mobile_module_int, current_mobile_module_int)
_helper(current_mobile_module_int, torch.div)
@settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_scalar_inplace(self, sample_input):
def historic_div_scalar_float_inplace(self, other: float):
return self.true_divide_(other)
def historic_div_scalar_int_inplace(self, other: int):
if self.is_floating_point():
return self.true_divide_(other)
return self.divide_(other, rounding_mode='trunc')
class MyModuleFloat(torch.nn.Module):
def __init__(self):
super(MyModuleFloat, self).__init__()
def forward(self, a, b: float):
a /= b
return a
class MyModuleInt(torch.nn.Module):
def __init__(self):
super(MyModuleInt, self).__init__()
def forward(self, a, b: int):
a /= b
return a
try:
v3_mobile_module_float = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_inplace_float_v2.ptl")
v3_mobile_module_int = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_inplace_int_v2.ptl")
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module_float = self._save_load_module(MyModuleFloat)
current_mobile_module_int = self._save_load_module(MyModuleInt)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = val_b
def _helper(m, fn):
m_result = self._try_fn(m, a, b)
fn_result = self._try_fn(fn, a, b)
if isinstance(m_result, Exception):
self.assertTrue(fn_result, Exception)
else:
self.assertEqual(m_result, fn_result)
if isinstance(b, float):
_helper(current_mobile_module_float, torch.Tensor.div_)
else:
_helper(current_mobile_module_int, torch.Tensor.div_)
# NOTE: Scalar division was already true division in op version 3,
# so this test verifies the behavior is unchanged.
def test_versioned_div_scalar_scalar(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, a: float, b: int, c: float, d: int):
result_0 = a / b
result_1 = a / c
result_2 = b / c
result_3 = b / d
return (result_0, result_1, result_2, result_3)
try:
v3_mobile_module = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_scalar_v2.ptl")
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module = self._save_load_mobile_module(MyModule)
def _helper(m, fn):
vals = (5., 3, 2., 7)
m_result = m(*vals)
fn_result = fn(*vals)
for mr, hr in zip(m_result, fn_result):
self.assertEqual(mr, hr)
_helper(v3_mobile_module, current_mobile_module)
def test_versioned_linspace(self):
class Module(torch.nn.Module):
def __init__(self):
super(Module, self).__init__()
def forward(self, a: Union[int, float, complex], b: Union[int, float, complex]):
c = torch.linspace(a, b, steps=5)
d = torch.linspace(a, b, steps=100)
return c, d
scripted_module = torch.jit.load(
pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_v7.ptl")
buffer = io.BytesIO(scripted_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
v7_mobile_module = _load_for_lite_interpreter(buffer)
current_mobile_module = self._save_load_mobile_module(Module)
sample_inputs = ((3, 10), (-10, 10), (4.0, 6.0), (3 + 4j, 4 + 5j))
for (a, b) in sample_inputs:
(output_with_step, output_without_step) = v7_mobile_module(a, b)
(current_with_step, current_without_step) = current_mobile_module(a, b)
# when no step is given, should have used 100
self.assertTrue(output_without_step.size(dim=0) == 100)
self.assertTrue(output_with_step.size(dim=0) == 5)
# outputs should be equal to the newest version
self.assertEqual(output_with_step, current_with_step)
self.assertEqual(output_without_step, current_without_step)
def test_versioned_linspace_out(self):
class Module(torch.nn.Module):
def __init__(self):
super(Module, self).__init__()
def forward(self, a: Union[int, float, complex], b: Union[int, float, complex], out: torch.Tensor):
return torch.linspace(a, b, steps=100, out=out)
model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_out_v7.ptl"
loaded_model = torch.jit.load(model_path)
buffer = io.BytesIO(loaded_model._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
v7_mobile_module = _load_for_lite_interpreter(buffer)
current_mobile_module = self._save_load_mobile_module(Module)
sample_inputs = (
(3, 10, torch.empty((100,), dtype=torch.int64), torch.empty((100,), dtype=torch.int64)),
(-10, 10, torch.empty((100,), dtype=torch.int64), torch.empty((100,), dtype=torch.int64)),
(4.0, 6.0, torch.empty((100,), dtype=torch.float64), torch.empty((100,), dtype=torch.float64)),
(3 + 4j, 4 + 5j, torch.empty((100,), dtype=torch.complex64), torch.empty((100,), dtype=torch.complex64)),
)
for (start, end, out_for_old, out_for_new) in sample_inputs:
output = v7_mobile_module(start, end, out_for_old)
output_current = current_mobile_module(start, end, out_for_new)
# when no step is given, should have used 100
self.assertTrue(output.size(dim=0) == 100)
# "Upgraded" model should match the new version output
self.assertEqual(output, output_current)
``` |
{
"source": "66095021/handy_code",
"score": 2
} |
#### File: 66095021/handy_code/busyserver.py
```python
import time
import json
import tornado.gen
import tornado.tcpserver
import tornado.ioloop
import tornado.options
from tornado.log import app_log
from concurrent.futures import ThreadPoolExecutor
class BoServer(tornado.tcpserver.TCPServer):
executor = ThreadPoolExecutor(max_workers=4)
def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None, read_chunk_size=None):
super(BoServer, self).__init__(io_loop, ssl_options, max_buffer_size, read_chunk_size)
def busy_work(self):
time.sleep(3)
print "work now"
return {"a": "b"}
@tornado.gen.coroutine
def handle_stream(self, stream, address):
print "get request {}".format(address)
try:
r = self.busy_work()
stream.write(json.dumps(r))
finally:
stream.close()
def main():
server = BoServer()
server.listen(9999)
tornado.ioloop.IOLoop.current().start()
if __name__ == '__main__':
main()
``` |
{
"source": "66418500/B50",
"score": 3
} |
#### File: b50/Sample/modian_sample.py
```python
import requests
import json
import pymysql
import time
from datetime import datetime
# conn = pymysql.connect(user='root', password='password', database='b50_demo', charset='utf8')
conn = pymysql.Connect(
host="192.168.3.11",
port=3306,
user='B50',
password='<PASSWORD>',
database='B50',
charset='utf8'
)
SAMPLING_DELAY = 20
MODIAN_DELAY = 0.5
def update_modian():
"""
Description:
This function is to sample and resolve detail information of each given modian amount project.
Resolved fields include project name, project id, real-time amount, start time, etc.
For each fan club, sample all projects it issued. Fields resolved are inserted into database
or updated if the project already existed in database.
Parameter: none
Author: <NAME>
Date: September 2019
"""
print('Sampling of Modian started at %s' % datetime.now())
fan_club_list = [] # Active fanclub.
project_list = [] # Projects in table project.
obsolete_project_list = [] # Projects that are obsoleted.
# Request parameters.
url = 'http://orderapi.modian.com/v45/user/build_product_list'
headers = {
'User-Agent': 'Mozilla/5.0',
}
# Connect database.
cursor = conn.cursor() # Create cursor.
# Get modian_id from table fanclubs.
sql = "SELECT modian_id, fanclub, id FROM fanclubs WHERE active = 1"
cursor.execute(sql)
for field in cursor:
if field[0] != '' and field[0] is not None:
fan_club_list.append((field[0], field[1], field[2]))
# Get project_id from table projects.
sql = "SELECT project_id FROM projects WHERE platform = '摩点'"
cursor.execute(sql)
for field in cursor:
if field[0] != '' and field[0] is not None:
project_list.append(field[0])
# Get project_id from table projects which are obsoleted.
sql = "SELECT project_id FROM projects WHERE platform = '摩点' AND is_obsolete = 1"
cursor.execute(sql)
for field in cursor:
if field[0] != '' and field[0] is not None:
obsolete_project_list.append(field[0])
# Sample starts.
for fan_club_tuple in fan_club_list:
# Delay.
time.sleep(MODIAN_DELAY)
# Sampling of one fan club starts here.
print(" Sampling of %s." % fan_club_tuple[1])
# Modian API parameters.
data = {
'to_user_id': fan_club_tuple[0],
'page_index': 0,
'client': 2,
'page_rows': 10,
'user_id': 1085377 # Any user_id is ok.
}
resp = requests.post(url, data=data, headers=headers)
return_dict = resp.json()
# Return data successfully.
if return_dict['status'] == '0':
projects = json.loads(return_dict['data']) # Convert string ro dictionary.
for project in projects:
project_name = project.get('name')
project_id = int(project.get('id'))
amount = 0 if project['backer_money'] == '' else float(project['backer_money'])
# fan_club = project['username']
# fanclub_id = project.get('user_id')
fanclub_id = fan_club_tuple[2]
start_time = project.get('start_time')
end_time = project.get('end_time')
# For new project, insert it into table projects.
if project_id not in project_list:
try:
new_data = (project_name, project_id, '摩点', amount, fanclub_id,
start_time, end_time, datetime.now(), datetime.now())
sql = "INSERT INTO projects(project_name, project_id, platform, amount, fanclub_id, " \
"start_time, end_time, created_at, updated_at)" \
" VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)"
cursor.execute(sql, new_data)
conn.commit()
print(" Inserting new Modian project 《%s》 finished." % project_name)
except cursor.Error as e:
conn.rollback()
print("Inserting modian project failed. Insert data without project_name. "
"project_id = %s. Error: %s" % (project_id, e))
# Some projects name may include characters which are incompatible with MySQL encoding.
# For such projects, insert data without project_name.
new_data = (project_id, '摩点', amount, fanclub_id,
start_time, end_time, datetime.now(), datetime.now())
sql = "INSERT INTO projects(project_id, platform, amount, fanclub_id, " \
"start_time, end_time, created_at, updated_at)" \
" VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
cursor.execute(sql, new_data)
conn.commit()
# For project already in table projects but not obsoleted, update amount field only.
elif project_id not in obsolete_project_list:
try:
update_data = (amount, datetime.now(), project_id)
sql = "UPDATE projects SET amount = %s, updated_at = %s WHERE project_id = %s"
cursor.execute(sql, update_data)
conn.commit()
print(" Updating Modian project 《%s》 finished." % project_name)
except cursor.Error as e:
conn.rollback()
print("Updating modian project failed. project_id = %s. Error: %s" % (project_id, e))
# Return data failed.
else:
print('Modian returns data failed. Status code: %s.' % return_dict['status'])
# Sampling finished.
print('Sampling of Modian finished at %s.' % datetime.now())
# print("#" * 48)
# print("\n")
def main():
try:
# time.sleep(10)
while True:
update_modian()
time.sleep(SAMPLING_DELAY)
except Exception as e:
print("%s \033[31;0m Something wrong.\033[0m %s" % (datetime.now(), e))
finally:
# print("Restart.")
main()
if __name__ == '__main__':
main()
``` |
{
"source": "664235822/OpenCV-Test",
"score": 3
} |
#### File: OpenCV-Test/template-matching-ocr/ocr_template_match.py
```python
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
import myutils
# 启动参数
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="path to imput image")
ap.add_argument("-t", "--template", required=True, help="path to template OCR-A image")
args = vars(ap.parse_args())
# 绘图提示
def cv_show(name, img):
cv2.imshow(name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# 读取模板图像
img = cv2.imread(args["template"])
cv_show("img", img)
# 灰度图
ref = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv_show("ref", ref)
# 二值图像
ref = cv2.threshold(ref, 10, 255, cv2.THRESH_BINARY_INV)[1]
cv_show("ref", ref)
# 计算轮廓
refCnts, hierarchy = cv2.findContours(ref.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 画轮廓
cv2.drawContours(img, refCnts, -1, (0, 0, 255), 3)
cv_show("img", img)
print(np.array(refCnts, dtype=object).shape)
# 排序轮廓
refCnts = myutils.sort_contours(refCnts, method="left-to-right")[0]
digits = {}
# 遍历每一个轮廓
for (i, c) in enumerate(refCnts):
# 计算外接矩形并且resize或合适大小
(x, y, w, h) = cv2.boundingRect(c)
roi = ref[y:y + h, x:x + w]
roi = cv2.resize(roi, (57, 88))
# 每一个数字对应一个模板
digits[i] = roi
# 初始化卷积和
rectKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (9, 3))
sqKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
# 读取输入图像,预处理
image = cv2.imread(args["image"])
cv_show("image", image)
image = myutils.resize(image, width=300)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv_show("gray", gray)
# 礼帽操作,突出更明亮的区域
tophat = cv2.morphologyEx(gray, cv2.MORPH_TOPHAT, rectKernel)
cv_show("tophat", tophat)
gradX = cv2.Sobel(tophat, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=-1)
gradX = np.absolute(gradX)
(minVal, maxVal) = (np.min(gradX), np.max(gradX))
gradX = (255 * ((gradX - minVal) / maxVal - minVal))
gradX = gradX.astype("uint8")
print(np.array(gradX, dtype=object).shape)
cv_show("gradX", gradX)
# 通过闭操作(先膨胀,再腐蚀)将数字连在一起
gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, rectKernel)
cv_show("gradX", gradX)
# 二值化处理
thresh = cv2.threshold(gradX, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
cv_show("thresh", thresh)
# 再来一个闭操作
gradX = cv2.morphologyEx(gradX, cv2.MORPH_CLOSE, sqKernel)
cv_show("thresh", thresh)
# 计算轮廓
threshCnts, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = threshCnts
cur_img = image.copy()
cv2.drawContours(cur_img, cnts, -1, (0, 0, 255), 3)
cv_show("img", cur_img)
locs = []
# 遍历轮廓
for (i, c) in enumerate(cnts):
# 计算轮廓
(x, y, w, h) = cv2.boundingRect(c)
ar = w / float(h)
# 根据实际的任务来,选择合适的区域,四个数字一组
if ar > 2.5 and ar < 4.0:
if (w > 40 and w < 55) and (h > 10 and h < 20):
# 符合的留下来
locs.append((x, y, w, h))
locs = sorted(locs, key=lambda x: x[0])
output = []
# 遍历每一个轮廓中的数字
for (i, (gX, gY, gW, gH)) in enumerate(locs):
groupOutput = []
# 根据坐标提取每一个组
group = gray[gY - 5:gY + gH + 5, gX - 5:gX + gW + 5]
cv_show("group", group)
# 预处理
group = cv2.threshold(group, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
cv_show("group", group)
# 计算每一组的轮廓
digitCnts, hierarchy = cv2.findContours(group.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
digitCnts = contours.sort_contours(digitCnts, method="left-to-right")[0]
# 计算每一组中的每一个数值
for c in digitCnts:
# 找到当前数组的轮廓,resize成合适的大小
(x, y, w, h) = cv2.boundingRect(c)
roi = group[y:y + h, x:x + w]
roi = cv2.resize(roi, (57, 88))
cv_show("roi", roi)
# 计算匹配得分
scores = []
# 在模板中计算每一个得分
for (digit, digitROI) in digits.items():
# 模板匹配
result = cv2.matchTemplate(roi, digitROI, cv2.TM_CCOEFF)
(_, score, _, _) = cv2.minMaxLoc(result)
scores.append(score)
# 得到最合适的数字
groupOutput.append(str(np.argmax(scores)))
# 画出来
cv2.rectangle(image, (gX - 5, gY - 5), (gX + gW + 5, gY + gH + 5), (0, 0, 255), 1)
cv2.putText(image, "", (gX, gY - 15), (cv2.FONT_HERSHEY_SIMPLEX), 0.65, (0, 0, 255), 2)
# 得到结果
output.extend(groupOutput)
# 打印结果
print("信用卡卡号 # :", format("".join(output)))
cv_show("image", image)
``` |
{
"source": "6666ev/bert_seq2seq",
"score": 2
} |
#### File: bert_seq2seq/bert_seq2seq/bart_chinese.py
```python
import torch
from bert_seq2seq.model.bart_model import BartConfig, BartForConditionalGeneration, BartModel, shift_tokens_right
from bert_seq2seq.tokenizer import Tokenizer,load_chinese_base_vocab
from bert_seq2seq.basic_bert import BasicBart
from bert_seq2seq.seq2seq_model import top_k_top_p_filtering
import torch.nn.functional as F
import torch.nn as nn
class BartGenerationModel(BasicBart):
def __init__(self, word2idx):
super().__init__()
config = BartConfig()
self.config = config
self.model = BartModel(config)
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
self.word2idx = word2idx
self.tokenizer = Tokenizer(self.word2idx)
self.bos_id = self.word2idx["[CLS]"]
self.eos_id = self.word2idx["[SEP]"]
self.unk_id = self.word2idx["[UNK]"]
def forward(self, input_ids, decoder_input_ids, labels=None):
input_ids = input_ids.to(self.device)
decoder_input_ids = decoder_input_ids.to(self.device)
if labels is not None:
labels = labels.to(self.device)
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
decoder_out, _ = self.model(
input_ids,
decoder_input_ids=decoder_input_ids,
)
lm_logits = self.lm_head(decoder_out)
target_mask = (decoder_input_ids > 0).float().view(-1)
masked_lm_loss = None
if labels is not None:
loss_fct = nn.CrossEntropyLoss()
masked_lm_loss = (loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) * target_mask).sum() / target_mask.sum()
output = (lm_logits,)
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
def sample_generate_encoder_decoder(self, text, input_max_length=256, out_max_length=200, top_k=30, top_p=0.0, add_eos=True):
token_out = self.tokenizer.encode(text, max_length=input_max_length)
if len(token_out) == 2:
token_ids = token_out[0]
else:
token_ids = token_out
if not add_eos:
token_ids = token_ids[:-1]
token_ids = torch.tensor(token_ids, device=self.device, dtype=torch.long).view(1, -1)
output_ids = []
input_decoder_ids = torch.tensor(self.bos_id, device=self.device, dtype=torch.long).view(1, -1)
with torch.no_grad():
for step in range(out_max_length):
scores = self.model(input_ids=token_ids, decoder_input_ids=input_decoder_ids)[0]
logit_score = torch.log_softmax(scores[:, -1], dim=-1).squeeze(0)
logit_score[self.unk_id] = -float('Inf')
filtered_logits = top_k_top_p_filtering(logit_score, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
if self.eos_id == next_token.item():
break
output_ids.append(next_token.item())
input_decoder_ids = torch.cat((input_decoder_ids, next_token.long().unsqueeze(0)), dim=1)
return self.tokenizer.decode(output_ids)
```
#### File: bert_seq2seq/bert_seq2seq/helper.py
```python
from typing import List
import torch
class LogitsProcessor:
"""Abstract base class for all logit processors that can be applied during generation."""
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
"""Torch method for processing logits."""
raise NotImplementedError(
f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
)
class RepetitionPenaltyLogitsProcessor(LogitsProcessor):
r"""
:class:`transformers.LogitsProcessor` enforcing an exponential penalty on repeated sequences.
Args:
repetition_penalty (:obj:`float`):
The parameter for repetition penalty. 1.0 means no penalty. See `this paper
<https://arxiv.org/pdf/1909.05858.pdf>`__ for more details.
"""
def __init__(self, penalty: float):
if not isinstance(penalty, float) or not (penalty > 0):
raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
self.penalty = penalty
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
score = torch.gather(scores, 1, input_ids)
# if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability
score = torch.where(score < 0, score * self.penalty, score / self.penalty)
scores.scatter_(1, input_ids, score)
return scores
class TemperatureLogitsProcessor(LogitsProcessor):
r"""
:class:`transformers.LogitsWarper` for temperature (exponential scaling output probability distribution).
Args:
temperature (:obj:`float`):
The value used to module the logits distribution.
"""
def __init__(self, temperature: float):
if not isinstance(temperature, float) or not (temperature > 0):
raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}")
self.temperature = temperature
def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.FloatTensor:
scores = scores / self.temperature
return scores
class TopPLogitsProcessor(LogitsProcessor):
"""
:class:`transformers.LogitsWarper` that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <=
prob_cut_off.
Args:
top_p (:obj:`float`):
If set to < 1, only the most probable tokens with probabilities that add up to top_p or higher are
kept for generation.
filter_value (:obj:`float`, `optional`, defaults to :obj:`-float("Inf")`):
All filtered values will be set to this float value.
min_tokens_to_keep (:obj:`int`, `optional`, defaults to 1):
Minimum number of tokens that cannot be filtered.
"""
def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
top_p = float(top_p)
if top_p < 0 or top_p > 1.0:
raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
self.top_p = top_p
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
sorted_logits, sorted_indices = torch.sort(scores, descending=True)
# print(sorted_logits.softmax(dim=-1))
cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1)
# Remove tokens with cumulative top_p above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > self.top_p
if self.min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., : self.min_tokens_to_keep - 1] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
class TopKLogitsProcessor(LogitsProcessor):
r"""
:class:`transformers.LogitsWarper` that performs top-k, i.e. restricting to the k highest probability elements.
Args:
top_k (:obj:`int`):
The number of highest probability vocabulary tokens to keep for top-k-filtering.
filter_value (:obj:`float`, `optional`, defaults to :obj:`-float("Inf")`):
All filtered values will be set to this float value.
min_tokens_to_keep (:obj:`int`, `optional`, defaults to 1):
Minimum number of tokens that cannot be filtered.
"""
def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
if not isinstance(top_k, int) or top_k <= 0:
raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
self.top_k = top_k
self.filter_value = filter_value
self.min_tokens_to_keep = min_tokens_to_keep
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
top_k = min(max(self.top_k, self.min_tokens_to_keep), scores.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None]
scores = scores.masked_fill(indices_to_remove, self.filter_value)
return scores
class ListProcessor(LogitsProcessor):
def __init__(self, list_processor: List[LogitsProcessor]) -> None:
super().__init__()
self.list_processor = list_processor
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
for processor in self.list_processor:
scores = processor(input_ids, scores)
return scores
if __name__ == "__main__":
print("hello world")
input_ids = torch.tensor([[1, 2, 0, 1]])
scores = torch.tensor([[-10, -5, -3, -1]], dtype=torch.float32)
# temp = TemperatureLogitsProcessor(10.0)
# top_p = TopPLogitsProcessor(top_p=0.5)
# top_k = TopKLogitsProcessor(top_k=1)
# scores = temp(input_ids, scores)
# print(scores)
# scores = top_p(input_ids, scores)
# print(scores)
# scores = top_k(input_ids, scores)
# print(scores)
list_processor = ListProcessor([TemperatureLogitsProcessor(10.0), TopPLogitsProcessor(top_p=0.5), TopKLogitsProcessor(top_k=1)])
scores = list_processor(input_ids, scores)
print(scores)
```
#### File: bert_seq2seq/model/gpt2_model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
class GPT2Config():
def __init__(
self,
vocab_size=21128,
n_positions=1024,
n_ctx=1024,
n_embd=768,
n_layer=12,
n_head=12,
n_inner=None,
activation_function="gelu_new",
resid_pdrop=0.1,
embd_pdrop=0.1,
attn_pdrop=0.1,
layer_norm_epsilon=1e-5,
initializer_range=0.02,
summary_type="cls_index",
summary_use_proj=True,
summary_activation=None,
summary_proj_to_labels=True,
summary_first_dropout=0.1,
gradient_checkpointing=False,
use_cache=True,
):
self.vocab_size = vocab_size
self.n_ctx = n_ctx
self.n_positions = n_positions
self.n_embd = n_embd
self.n_layer = n_layer
self.n_head = n_head
self.n_inner = n_inner
self.activation_function = activation_function
self.resid_pdrop = resid_pdrop
self.embd_pdrop = embd_pdrop
self.attn_pdrop = attn_pdrop
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_range = initializer_range
self.summary_type = summary_type
self.summary_use_proj = summary_use_proj
self.summary_activation = summary_activation
self.summary_first_dropout = summary_first_dropout
self.summary_proj_to_labels = summary_proj_to_labels
self.gradient_checkpointing = gradient_checkpointing
self.use_cache = use_cache
self.add_cross_attention = False
self.use_return_dict = False
self.output_attentions = False
self.output_hidden_states = False
@property
def max_position_embeddings(self):
return self.n_positions
@property
def hidden_size(self):
return self.n_embd
@property
def num_attention_heads(self):
return self.n_head
@property
def num_hidden_layers(self):
return self.n_layer
def _gelu_python(x):
"""
Original Implementation of the GELU activation function in Google BERT repo when initially created. For
information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in
torch.nn.functional Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def gelu_new(x):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
gelu = F.gelu
def gelu_fast(x):
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
def _silu_python(x):
"""
See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear
Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function
Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated
Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with
later.
"""
return x * torch.sigmoid(x)
def mish(x):
return x * torch.tanh(torch.nn.functional.softplus(x))
def linear_act(x):
return x
ACT2FN = {
"relu": F.relu,
"gelu": gelu,
"tanh": torch.tanh,
"gelu_new": gelu_new,
"gelu_fast": gelu_fast,
"mish": mish,
"linear": linear_act,
"sigmoid": torch.sigmoid,
}
class Conv1D(nn.Module):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (:obj:`int`): The number of output features.
nx (:obj:`int`): The number of input features.
"""
def __init__(self, nf, nx):
super().__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False, is_cross_attention=False):
super().__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.register_buffer(
"bias", torch.tril(torch.ones((n_ctx, n_ctx), dtype=torch.uint8)).view(1, 1, n_ctx, n_ctx)
)
self.register_buffer("masked_bias", torch.tensor(-1e4))
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.is_cross_attention = is_cross_attention
if self.is_cross_attention:
self.c_attn = Conv1D(2 * n_state, nx)
self.q_attn = Conv1D(n_state, nx)
else:
self.c_attn = Conv1D(3 * n_state, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
w = torch.matmul(q, k)
if self.scale:
w = w / (float(v.size(-1)) ** 0.5)
nd, ns = w.size(-2), w.size(-1)
if not self.is_cross_attention:
# if only "normal" attention layer implements causal mask
mask = self.bias[:, :, ns - nd : ns, :ns]
w = torch.where(mask.bool(), w, self.masked_bias.to(w.dtype))
if attention_mask is not None:
# Apply the attention mask
w = w + attention_mask
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# Mask heads if we want to
if head_mask is not None:
w = w * head_mask
outputs = (torch.matmul(w, v),)
if output_attentions:
outputs += (w,)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
if encoder_hidden_states is not None:
assert hasattr(
self, "q_attn"
), "If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `Attention(..., is_cross_attention=True)`."
query = self.q_attn(hidden_states)
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
attention_mask = encoder_attention_mask
else:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = (key.transpose(-2, -1), value) # transpose to have same shapes
else:
present = None
attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
return (a, present) + attn_outputs[1:] # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, n_ctx, config, scale=False):
super().__init__()
hidden_size = config.n_embd
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = Attention(hidden_size, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if config.add_cross_attention:
self.crossattention = Attention(hidden_size, n_ctx, config, scale, is_cross_attention=True)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = MLP(inner_dim, config)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
attn_outputs = self.attn(
self.ln_1(hidden_states),
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
# residual connection
hidden_states = attn_output + hidden_states
if encoder_hidden_states is not None:
# add one self-attention block for cross-attention
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
cross_attn_outputs = self.crossattention(
self.ln_cross_attn(hidden_states),
attention_mask=attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
)
attn_output = cross_attn_outputs[0]
# residual connection
hidden_states = hidden_states + attn_output
outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
feed_forward_hidden_states = self.mlp(self.ln_2(hidden_states))
# residual connection
hidden_states = hidden_states + feed_forward_hidden_states
if use_cache:
outputs = (hidden_states,) + outputs
else:
outputs = (hidden_states,) + outputs[1:]
return outputs # hidden_states, present, (attentions, cross_attentions)
class GPT2Model(nn.Module):
def __init__(self, config):
super().__init__()
self.wte = nn.Embedding(config.vocab_size, config.n_embd)
self.wpe = nn.Embedding(config.n_positions, config.n_embd)
self.drop = nn.Dropout(config.embd_pdrop)
self.h = nn.ModuleList([Block(config.n_ctx, config, scale=True) for _ in range(config.n_layer)])
self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
self.config = config
self.device_map = None
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, new_embeddings):
self.wte = new_embeddings
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
batch_size = input_ids.shape[0]
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
batch_size = inputs_embeds.shape[0]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if token_type_ids is not None:
token_type_ids = token_type_ids.view(-1, input_shape[-1])
if position_ids is not None:
position_ids = position_ids.view(-1, input_shape[-1])
if past_key_values is None:
past_length = 0
past_key_values = tuple([None] * len(self.h))
else:
past_length = past_key_values[0][0].size(-2)
if position_ids is None:
device = input_ids.device if input_ids is not None else inputs_embeds.device
position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
# Attention mask.
if attention_mask is not None:
assert batch_size > 0, "batch_size has to be defined and > 0"
# attention_mask = attention_mask.view(batch_size, -1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
# attention_mask = attention_mask[:, None, None, :]
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
# attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
attention_mask = (1.0 - attention_mask) * -10000.0
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.add_cross_attention and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_attention_mask = None
if inputs_embeds is None:
inputs_embeds = self.wte(input_ids)
position_embeds = self.wpe(position_ids)
hidden_states = inputs_embeds + position_embeds
if token_type_ids is not None:
token_type_embeds = self.wte(token_type_ids)
hidden_states = hidden_states + token_type_embeds
hidden_states = self.drop(hidden_states)
output_shape = input_shape + (hidden_states.size(-1),)
presents = () if use_cache else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = block(
hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
head_mask=None,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = outputs[0]
if use_cache is True:
presents = presents + (outputs[1],)
if output_attentions:
all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
hidden_states = self.ln_f(hidden_states)
hidden_states = hidden_states.view(*output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return hidden_states
class GPT2LMHeadModel(nn.Module):
_keys_to_ignore_on_load_missing = [r"h\.\d+\.attn\.masked_bias", r"lm_head\.weight"]
def __init__(self, config):
super().__init__()
self.config = config
self.transformer = GPT2Model(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
# Model parallel
self.model_parallel = False
self.device_map = None
def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
if token_type_ids is not None:
token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if past:
position_ids = position_ids[:, -1].unsqueeze(-1)
else:
position_ids = None
return {
"input_ids": input_ids,
"past_key_values": past,
"use_cache": kwargs.get("use_cache"),
"position_ids": position_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
def forward(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
``labels = input_ids`` Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to
``-100`` are ignored (masked), the loss is only computed for labels in ``[0, ..., config.vocab_size]``
"""
transformer_outputs = self.transformer(
input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = transformer_outputs
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# Shift so that tokens < n predict n
shift_logits = lm_logits[..., :-1, :].contiguous()
shift_labels = labels[..., 1:].contiguous()
# Flatten the tokens
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
# print(attention_mask)
return loss, lm_logits
# if __name__ == "__main__":
# config = GPT2Config()
# model = GPT2LMHeadModel(config=config)
# model.eval()
# t1 = torch.randint(1, 1000, (2, 10))
# print(t1.shape)
# for k, v in model.named_parameters():
# print(k)
# if "lm_head" in k:
# print(v.shape)
# model.load_state_dict(torch.load("./state_dict/gpt_pytorch_model.bin"))
# loss, out = model(t1)
# print(out.shape)
# print(sample_generate(model, "今天天气好", out_max_length=100))
```
#### File: bert_seq2seq/model/t5_model.py
```python
import copy
import math
import os
import warnings
import torch
import torch.nn.functional as F
from torch import nn
from torch.nn import CrossEntropyLoss
class T5Config:
model_type = "t5"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=50000,
d_model=768,
d_kv=64,
d_ff=2048,
num_layers=12,
num_decoder_layers=12,
num_heads=12,
relative_attention_num_buckets=32,
dropout_rate=0.1,
layer_norm_epsilon=1e-6,
initializer_factor=1.0,
feed_forward_proj="gated-gelu",
is_encoder_decoder=True,
use_cache=True,
pad_token_id=0,
eos_token_id=1,
is_decoder=False,
):
self.is_decoder = is_decoder
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_decoder_layers = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.feed_forward_proj = feed_forward_proj
self.use_cache = use_cache
@property
def hidden_size(self):
return self.d_model
@property
def num_attention_heads(self):
return self.num_heads
@property
def num_hidden_layers(self):
return self.num_layers
class T5SmallConfig:
model_type = "t5"
keys_to_ignore_at_inference = ["past_key_values"]
def __init__(
self,
vocab_size=50000,
d_model=512,
d_kv=64,
d_ff=1024,
num_layers=8,
num_decoder_layers=8,
num_heads=6,
relative_attention_num_buckets=32,
dropout_rate=0.1,
layer_norm_epsilon=1e-6,
initializer_factor=1.0,
feed_forward_proj="gated-gelu",
is_encoder_decoder=True,
use_cache=True,
pad_token_id=0,
eos_token_id=1,
is_decoder=False,
):
self.is_decoder = is_decoder
self.vocab_size = vocab_size
self.d_model = d_model
self.d_kv = d_kv
self.d_ff = d_ff
self.num_layers = num_layers
self.num_decoder_layers = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
self.num_heads = num_heads
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.layer_norm_epsilon = layer_norm_epsilon
self.initializer_factor = initializer_factor
self.feed_forward_proj = feed_forward_proj
self.use_cache = use_cache
@property
def hidden_size(self):
return self.d_model
@property
def num_attention_heads(self):
return self.num_heads
@property
def num_hidden_layers(self):
return self.num_layers
def _gelu_python(x):
"""
Original Implementation of the GELU activation function in Google BERT repo when initially created. For
information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in
torch.nn.functional Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def gelu_new(x):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
gelu = F.gelu
def gelu_fast(x):
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
def _silu_python(x):
"""
See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear
Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function
Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated
Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with
later.
"""
return x * torch.sigmoid(x)
def mish(x):
return x * torch.tanh(torch.nn.functional.softplus(x))
def linear_act(x):
return x
ACT2FN = {
"relu": F.relu,
"gelu": gelu,
"tanh": torch.tanh,
"gelu_new": gelu_new,
"gelu_fast": gelu_fast,
"mish": mish,
"linear": linear_act,
"sigmoid": torch.sigmoid,
}
class T5LayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Construct a layernorm module in the T5 style No bias and no subtraction of mean.
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
# layer norm should always be calculated in float32
variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
# convert into float16 if necessary
if self.weight.dtype == torch.float16:
hidden_states = hidden_states.to(torch.float16)
return self.weight * hidden_states
class T5DenseReluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
hidden_states = self.wi(hidden_states)
hidden_states = F.relu(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
class T5DenseGatedGeluDense(nn.Module):
def __init__(self, config):
super().__init__()
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.gelu_act = ACT2FN["gelu_new"]
def forward(self, hidden_states):
hidden_gelu = self.gelu_act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
hidden_states = self.wo(hidden_states)
return hidden_states
class T5LayerFF(nn.Module):
def __init__(self, config):
super().__init__()
if config.feed_forward_proj == "relu":
self.DenseReluDense = T5DenseReluDense(config)
elif config.feed_forward_proj == "gated-gelu":
self.DenseReluDense = T5DenseGatedGeluDense(config)
else:
raise ValueError(
f"{self.config.feed_forward_proj} is not supported. Choose between `relu` and `gated-gelu`"
)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(self, hidden_states):
forwarded_states = self.layer_norm(hidden_states)
forwarded_states = self.DenseReluDense(forwarded_states)
hidden_states = hidden_states + self.dropout(forwarded_states)
return hidden_states
class T5Attention(nn.Module):
def __init__(self, config: T5Config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
# Mesh TensorFlow initialization to avoid scaling before softmax
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.pruned_heads = set()
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_postion_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_postion_if_large = torch.min(
relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_postion_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length):
""" Compute binned relative position bias """
context_position = torch.arange(query_length, dtype=torch.long)[:, None]
memory_position = torch.arange(key_length, dtype=torch.long)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
)
relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_value=None,
layer_head_mask=None,
query_length=None,
use_cache=False,
output_attentions=False,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, key_length) (non-causal) or (batch_size, key_length, key_length)
# past_key_value[0] is (batch_size, n_heads, q_len - 1, dim_per_head)
batch_size, seq_length = hidden_states.shape[:2]
real_seq_length = seq_length
if past_key_value is not None:
assert (
len(past_key_value) == 2
), "past_key_value should have 2 past states: keys and values. Got {} past states".format(
len(past_key_value)
)
real_seq_length += past_key_value[0].shape[2] if query_length is None else query_length
key_length = real_seq_length if key_value_states is None else key_value_states.shape[1]
def shape(states):
""" projection """
return states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
def unshape(states):
""" reshape """
return states.transpose(1, 2).contiguous().view(batch_size, -1, self.inner_dim)
def project(hidden_states, proj_layer, key_value_states, past_key_value):
""" projects hidden states correctly to key/query states """
if key_value_states is None:
# self-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(hidden_states))
elif past_key_value is None:
# cross-attn
# (batch_size, n_heads, seq_length, dim_per_head)
hidden_states = shape(proj_layer(key_value_states))
if past_key_value is not None:
if key_value_states is None:
# self-attn
# (batch_size, n_heads, key_length, dim_per_head)
hidden_states = torch.cat([past_key_value, hidden_states], dim=2)
else:
# cross-attn
hidden_states = past_key_value
return hidden_states
# get query states
query_states = shape(self.q(hidden_states)) # (batch_size, n_heads, seq_length, dim_per_head)
# get key/value states
key_states = project(
hidden_states, self.k, key_value_states, past_key_value[0] if past_key_value is not None else None
)
value_states = project(
hidden_states, self.v, key_value_states, past_key_value[1] if past_key_value is not None else None
)
# compute scores
scores = torch.matmul(
query_states, key_states.transpose(3, 2)
) # equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
if position_bias is None:
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, real_seq_length, key_length), device=scores.device, dtype=scores.dtype
)
else:
position_bias = self.compute_bias(real_seq_length, key_length)
# if key and values are already calculated
# we want only the last query position bias
if past_key_value is not None:
position_bias = position_bias[:, :, -seq_length:, :]
if mask is not None:
position_bias = position_bias + mask # (batch_size, n_heads, seq_length, key_length)
scores += position_bias
attn_weights = F.softmax(scores.float(), dim=-1).type_as(
scores
) # (batch_size, n_heads, seq_length, key_length)
attn_weights = F.dropout(
attn_weights, p=self.dropout, training=self.training
) # (batch_size, n_heads, seq_length, key_length)
# Mask heads if we want to
if layer_head_mask is not None:
attn_weights = attn_weights * layer_head_mask
attn_output = unshape(torch.matmul(attn_weights, value_states)) # (batch_size, seq_length, dim)
attn_output = self.o(attn_output)
present_key_value_state = (key_states, value_states) if (self.is_decoder and use_cache) else None
outputs = (attn_output,) + (present_key_value_state,) + (position_bias,)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
class T5LayerSelfAttention(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.SelfAttention = T5Attention(config, has_relative_attention_bias=has_relative_attention_bias)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.SelfAttention(
normed_hidden_states,
mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = hidden_states + self.dropout(attention_output[0])
outputs = (hidden_states,) + attention_output[1:] # add attentions if we output them
return outputs
class T5LayerCrossAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.EncDecAttention = T5Attention(config, has_relative_attention_bias=False)
self.layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
def forward(
self,
hidden_states,
key_value_states,
attention_mask=None,
position_bias=None,
layer_head_mask=None,
past_key_value=None,
use_cache=False,
query_length=None,
output_attentions=False,
):
normed_hidden_states = self.layer_norm(hidden_states)
attention_output = self.EncDecAttention(
normed_hidden_states,
mask=attention_mask,
key_value_states=key_value_states,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=past_key_value,
use_cache=use_cache,
query_length=query_length,
output_attentions=output_attentions,
)
layer_output = hidden_states + self.dropout(attention_output[0])
outputs = (layer_output,) + attention_output[1:] # add attentions if we output them
return outputs
class T5Block(nn.Module):
def __init__(self, config, has_relative_attention_bias=False):
super().__init__()
self.is_decoder = config.is_decoder
self.layer = nn.ModuleList()
self.layer.append(T5LayerSelfAttention(config, has_relative_attention_bias=has_relative_attention_bias))
if self.is_decoder:
self.layer.append(T5LayerCrossAttention(config))
self.layer.append(T5LayerFF(config))
def forward(
self,
hidden_states,
attention_mask=None,
position_bias=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
encoder_decoder_position_bias=None,
layer_head_mask=None,
encoder_layer_head_mask=None,
past_key_value=None,
use_cache=False,
output_attentions=False,
return_dict=True,
):
if past_key_value is not None:
assert self.is_decoder, "Only decoder can use `past_key_values`"
expected_num_past_key_values = 2 if encoder_hidden_states is None else 4
error_message = "There should be {} past states. 2 (past / key) for self attention.{} Got {} past key / value states".format(
expected_num_past_key_values,
"2 (past / key) for cross attention" if expected_num_past_key_values == 4 else "",
len(past_key_value),
)
assert len(past_key_value) == expected_num_past_key_values, error_message
self_attn_past_key_value = past_key_value[:2]
cross_attn_past_key_value = past_key_value[2:]
else:
self_attn_past_key_value, cross_attn_past_key_value = None, None
self_attention_outputs = self.layer[0](
hidden_states,
attention_mask=attention_mask,
position_bias=position_bias,
layer_head_mask=layer_head_mask,
past_key_value=self_attn_past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states, present_key_value_state = self_attention_outputs[:2]
attention_outputs = self_attention_outputs[2:] # Keep self-attention outputs and relative position weights
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
do_cross_attention = self.is_decoder and encoder_hidden_states is not None
if do_cross_attention:
# the actual query length is unknown for cross attention
# if using past key value states. Need to inject it here
if present_key_value_state is not None:
query_length = present_key_value_state[0].shape[2]
else:
query_length = None
cross_attention_outputs = self.layer[1](
hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
position_bias=encoder_decoder_position_bias,
layer_head_mask=encoder_layer_head_mask,
past_key_value=cross_attn_past_key_value,
query_length=query_length,
use_cache=use_cache,
output_attentions=output_attentions,
)
hidden_states = cross_attention_outputs[0]
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
# Combine self attn and cross attn key value states
if present_key_value_state is not None:
present_key_value_state = present_key_value_state + cross_attention_outputs[1]
# Keep cross-attention outputs and relative position weights
attention_outputs = attention_outputs + cross_attention_outputs[2:]
# Apply Feed Forward layer
hidden_states = self.layer[-1](hidden_states)
# clamp inf values to enable fp16 training
if hidden_states.dtype == torch.float16 and torch.isinf(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
outputs = outputs + (present_key_value_state,) + attention_outputs
return outputs # hidden-states, present_key_value_states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
class T5Stack(nn.Module):
def __init__(self, config, embed_tokens=None):
super().__init__()
self.config = config
self.embed_tokens = embed_tokens
self.is_decoder = config.is_decoder
self.block = nn.ModuleList(
[T5Block(config, has_relative_attention_bias=bool(i == 0)) for i in range(config.num_layers)]
)
self.final_layer_norm = T5LayerNorm(config.d_model, eps=config.layer_norm_epsilon)
self.dropout = nn.Dropout(config.dropout_rate)
self.model_parallel = False
self.device_map = None
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, new_embeddings):
self.embed_tokens = new_embeddings
def get_extended_attention_mask(self, attention_mask, input_shape, device):
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def invert_attention_mask(self, encoder_attention_mask):
"""
Invert an attention mask (e.g., switches 0. and 1.).
Args:
encoder_attention_mask (:obj:`torch.Tensor`): An attention mask.
Returns:
:obj:`torch.Tensor`: The inverted attention mask.
"""
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
return encoder_extended_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
inputs_embeds=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
# Model parallel
if self.model_parallel:
torch.cuda.set_device(self.first_device)
self.embed_tokens = self.embed_tokens.to(self.first_device)
use_cache = use_cache if use_cache is not None else self.config.use_cache
# if input_ids is not None and inputs_embeds is not None:
# err_msg_prefix = "decoder_" if self.is_decoder else ""
# raise ValueError(
# f"You cannot specify both {err_msg_prefix}inputs and {err_msg_prefix}inputs_embeds at the same time"
# )
# elif input_ids is not None:
input_shape = input_ids.size()
# input_ids = input_ids.view(-1, input_shape[-1])
# elif inputs_embeds is not None:
# input_shape = inputs_embeds.size()[:-1]
# else:
# err_msg_prefix = "decoder_" if self.is_decoder else ""
# raise ValueError(f"You have to specify either {err_msg_prefix}inputs or {err_msg_prefix}inputs_embeds")
if inputs_embeds is None:
assert self.embed_tokens is not None, "You have to initialize the model with valid token embeddings"
inputs_embeds = self.embed_tokens(input_ids)
batch_size, seq_length = input_shape
# required mask seq length can be calculated via length of past
mask_seq_length = past_key_values[0][0].shape[2] + seq_length if past_key_values is not None else seq_length
if use_cache is True:
assert self.is_decoder, ":obj:`use_cache` can only be set to `True` if {} is used as a decoder".format(
self
)
if attention_mask is None:
attention_mask = torch.ones(batch_size, mask_seq_length).to(inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:
encoder_seq_length = encoder_hidden_states.shape[1]
encoder_attention_mask = torch.ones(
batch_size, encoder_seq_length, device=inputs_embeds.device, dtype=torch.long
)
# initialize past_key_values with `None` if past does not exist
if past_key_values is None:
past_key_values = [None] * len(self.block)
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, inputs_embeds.device)
if self.is_decoder and encoder_attention_mask is not None:
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
present_key_value_states = () if use_cache else None
all_hidden_states = None
all_attentions = () if output_attentions else None
all_cross_attentions = () if (output_attentions and self.is_decoder) else None
position_bias = None
encoder_decoder_position_bias = None
hidden_states = self.dropout(inputs_embeds)
for i, (layer_module, past_key_value) in enumerate(zip(self.block, past_key_values)):
layer_head_mask = None
encoder_layer_head_mask = None
# Model parallel
if self.model_parallel:
torch.cuda.set_device(hidden_states.device)
# Ensure that attention_mask is always on the same device as hidden_states
if attention_mask is not None:
attention_mask = attention_mask.to(hidden_states.device)
if position_bias is not None:
position_bias = position_bias.to(hidden_states.device)
if encoder_hidden_states is not None:
encoder_hidden_states = encoder_hidden_states.to(hidden_states.device)
if encoder_extended_attention_mask is not None:
encoder_extended_attention_mask = encoder_extended_attention_mask.to(hidden_states.device)
if encoder_decoder_position_bias is not None:
encoder_decoder_position_bias = encoder_decoder_position_bias.to(hidden_states.device)
if layer_head_mask is not None:
layer_head_mask = layer_head_mask.to(hidden_states.device)
if encoder_layer_head_mask is not None:
encoder_layer_head_mask = encoder_layer_head_mask.to(hidden_states.device)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states,
attention_mask=extended_attention_mask,
position_bias=position_bias,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
encoder_decoder_position_bias=encoder_decoder_position_bias,
layer_head_mask=None,
encoder_layer_head_mask=None,
past_key_value=past_key_value,
use_cache=use_cache,
output_attentions=output_attentions,
)
# layer_outputs is a tuple with:
# hidden-states, key-value-states, (self-attention weights), (self-attention position bias), (cross-attention weights), (cross-attention position bias)
hidden_states, present_key_value_state = layer_outputs[:2]
# We share the position biases between the layers - the first layer store them
# layer_outputs = hidden-states, key-value-states (self-attention weights),
# (self-attention position bias), (cross-attention weights), (cross-attention position bias)
position_bias = layer_outputs[2]
if self.is_decoder and encoder_hidden_states is not None:
encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]
# append next layer key value states
if use_cache:
present_key_value_states = present_key_value_states + (present_key_value_state,)
if output_attentions:
all_attentions = all_attentions + (layer_outputs[3],)
if self.is_decoder:
all_cross_attentions = all_cross_attentions + (layer_outputs[5],)
# Model Parallel: If it's the last layer for that device, put things on the next device
if self.model_parallel:
for k, v in self.device_map.items():
if i == v[-1] and "cuda:" + str(k) != self.last_device:
hidden_states = hidden_states.to("cuda:" + str(k + 1))
hidden_states = self.final_layer_norm(hidden_states)
hidden_states = self.dropout(hidden_states)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return (hidden_states,)
class T5Model(nn.Module):
_keys_to_ignore_on_load_missing = [
r"encoder\.embed_tokens\.weight",
r"decoder\.embed_tokens\.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight",
]
def __init__(self, config: T5Config):
super().__init__()
self.config = config
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
# Model parallel
self.model_parallel = False
self.device_map = None
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example::
>>> from transformers import T5Tokenizer, T5Model
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5Model.from_pretrained('t5-small')
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
encoder_head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
return (decoder_outputs[0], )
class T5ForConditionalGeneration(nn.Module):
_keys_to_ignore_on_load_missing = [
r"encoder\.embed_tokens\.weight",
r"decoder\.embed_tokens\.weight",
r"lm_head\.weight",
]
_keys_to_ignore_on_load_unexpected = [
r"decoder\.block\.0\.layer\.1\.EncDecAttention\.relative_attention_bias\.weight",
]
def __init__(self, config):
super().__init__()
self.model_dim = config.d_model
self.config = config
self.shared = nn.Embedding(config.vocab_size, config.d_model)
encoder_config = copy.deepcopy(config)
encoder_config.is_decoder = False
encoder_config.use_cache = False
encoder_config.is_encoder_decoder = False
self.encoder = T5Stack(encoder_config, self.shared)
decoder_config = copy.deepcopy(config)
decoder_config.is_decoder = True
decoder_config.is_encoder_decoder = False
decoder_config.num_layers = config.num_decoder_layers
self.decoder = T5Stack(decoder_config, self.shared)
self.lm_head = nn.Linear(config.d_model, config.vocab_size, bias=False)
# Model parallel
self.model_parallel = False
self.device_map = None
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, new_embeddings):
self.shared = new_embeddings
self.encoder.set_input_embeddings(new_embeddings)
self.decoder.set_input_embeddings(new_embeddings)
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def get_output_embeddings(self):
return self.lm_head
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[-100, 0, ...,
config.vocab_size - 1]`. All labels set to ``-100`` are ignored (masked), the loss is only computed for
labels in ``[0, ..., config.vocab_size]``
Returns:
Examples::
>>> from transformers import T5Tokenizer, T5ForConditionalGeneration
>>> tokenizer = T5Tokenizer.from_pretrained('t5-small')
>>> model = T5ForConditionalGeneration.from_pretrained('t5-small')
>>> input_ids = tokenizer('The <extra_id_0> walks in <extra_id_1> park', return_tensors='pt').input_ids
>>> labels = tokenizer('<extra_id_0> cute dog <extra_id_1> the <extra_id_2> </s>', return_tensors='pt').input_ids
>>> outputs = model(input_ids=input_ids, labels=labels)
>>> loss = outputs.loss
>>> logits = outputs.logits
>>> input_ids = tokenizer("summarize: studies have shown that owning a dog is good for you ", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model.generate(input_ids)
"""
use_cache = use_cache if use_cache is not None else self.config.use_cache
# FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask
# Encode if needed (training, first prediction pass)
if encoder_outputs is None:
# Convert encoder inputs in embeddings if needed
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
inputs_embeds=inputs_embeds,
head_mask=head_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
hidden_states = encoder_outputs[0]
if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:
# get decoder inputs from shifting lm labels to the right
decoder_input_ids = self._shift_right(labels)
# If decoding with past key value states, only the last tokens
# should be given as an input
if past_key_values is not None:
assert labels is None, "Decoder should not use cached key value states when training."
if decoder_input_ids is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
if decoder_inputs_embeds is not None:
decoder_inputs_embeds = decoder_inputs_embeds[:, -1:]
# Decode
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
inputs_embeds=decoder_inputs_embeds,
past_key_values=past_key_values,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
encoder_head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = decoder_outputs[0]
lm_logits = self.lm_head(sequence_output)
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-100)
loss = loss_fct(lm_logits.view(-1, lm_logits.size(-1)), labels.view(-1))
# TODO(thom): Add z_loss https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/layers.py#L666
return (loss, lm_logits)
return (lm_logits, )
def prepare_inputs_for_generation(
self, input_ids, past=None, attention_mask=None, use_cache=None, encoder_outputs=None, **kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
input_ids = input_ids[:, -1:]
return {
"decoder_input_ids": input_ids,
"past_key_values": past,
"encoder_outputs": encoder_outputs,
"attention_mask": attention_mask,
"use_cache": use_cache,
}
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return self._shift_right(labels)
def get_extended_attention_mask(attention_mask, input_shape, device, is_decoder=False):
"""
Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
Arguments:
attention_mask (:obj:`torch.Tensor`):
Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
input_shape (:obj:`Tuple[int]`):
The shape of the input to the model.
device: (:obj:`torch.device`):
The device of the input to the model.
Returns:
:obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
"""
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
# Provided a padding mask of dimensions [batch_size, seq_length]
# - if the model is a decoder, apply a causal mask in addition to the padding mask
# - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
if is_decoder:
batch_size, seq_length = input_shape
seq_ids = torch.arange(seq_length, device=device)
causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
# in case past_key_values are used we need to add a prefix ones mask to the causal mask
# causal and attention masks must have same type with pytorch version < 1.3
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = torch.cat(
[
torch.ones(
(batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
"Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
input_shape, attention_mask.shape
)
)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
if __name__ == '__main__':
config = T5Config()
model = T5Model(config)
t1 = (torch.rand(1, 10)*10).long()
t2 = (torch.rand(1, 12) * 10).long()
out = model(input_ids=t1, decoder_input_ids=t2)
print(len(out))
print(out[0].shape)
# attention_mask = torch.ones(1, 5)
# out = get_extended_attention_mask(attention_mask, attention_mask.shape, device="cpu", is_decoder=True)
#
# print(out)
# print(out.shape)
```
#### File: bert_seq2seq/bert_seq2seq/simbert_model.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
import random
from bert_seq2seq.tokenizer import Tokenizer, load_chinese_base_vocab
import time
from bert_seq2seq.config import yayun_list
import os
from bert_seq2seq.basic_bert import BasicBert
import numpy as np
from bert_seq2seq.helper import RepetitionPenaltyLogitsProcessor, TemperatureLogitsProcessor, TopKLogitsProcessor, \
TopPLogitsProcessor, ListProcessor
def top_k_top_p_filtering(logits, top_k=0, top_p=0.0, filter_value=-float('Inf')):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k > 0: keep only top k tokens with highest probability (top-k filtering).
top_p > 0.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
assert logits.dim() == 1 # batch size 1 for now - could be updated for more but the code would be less clear
top_k = min(top_k, logits.size(-1)) # Safety check
if top_k > 0:
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probs > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
return logits
class SimBertModel(BasicBert):
"""
"""
def __init__(self, word2ix, model_name="roberta", tokenizer=None):
super(SimBertModel, self).__init__(word2ix=word2ix, model_name=model_name, tokenizer=tokenizer)
self.word2ix = word2ix
self.hidden_dim = self.config.hidden_size
self.vocab_size = len(word2ix)
def compute_loss(self, cls_token_state, predictions, labels, target_mask):
loss1 = self.compute_loss_of_seq2seq(predictions, labels, target_mask)
loss2 = self.compute_loss_of_similarity(cls_token_state) ## 拿出cls向量
return loss1 + loss2
def compute_loss_of_seq2seq(self, predictions, labels, target_mask):
predictions = predictions.view(-1, self.vocab_size)
labels = labels.view(-1)
target_mask = target_mask.view(-1).float()
loss = nn.CrossEntropyLoss(ignore_index=0, reduction="none")
return (loss(predictions, labels) * target_mask).sum() / target_mask.sum() ## 通过mask 取消 pad 和句子a部分预测的影响
def compute_loss_of_similarity(self, y_pred):
y_true = self.get_labels_of_similarity(y_pred) # 构建标签
y_true = y_true.to(self.device)
norm_a = torch.nn.functional.normalize(y_pred, dim=-1, p=2)
similarities = norm_a.matmul(norm_a.t())
similarities = similarities - (torch.eye(y_pred.shape[0]) * 1e12).to(self.device) # 排除对角线
similarities = similarities * 20 # scale
loss_f = nn.CrossEntropyLoss()
loss = loss_f(similarities, y_true)
return loss
def get_labels_of_similarity(self, y_pred):
idxs = torch.arange(0, y_pred.shape[0])
idxs_1 = idxs[None, :]
idxs_2 = (idxs + 1 - idxs % 2 * 2)[:, None]
labels = (idxs_1 == idxs_2).float().argmax(dim=-1).long()
return labels
def forward(self, input_tensor, token_type_id, position_enc=None, labels=None):
## 传入输入,位置编码,token type id ,还有句子a 和句子b的长度,注意都是传入一个batch数据
## 传入的几个值,在seq2seq 的batch iter 函数里面都可以返回
input_tensor = input_tensor.to(self.device)
token_type_id = token_type_id.to(self.device)
if position_enc is not None:
position_enc = position_enc.to(self.device)
if labels is not None :
labels = labels.to(self.device)
input_shape = input_tensor.shape
batch_size = input_shape[0]
seq_len = input_shape[1]
## 构建特殊的mask
ones = torch.ones((1, 1, seq_len, seq_len), dtype=torch.float32, device=self.device)
a_mask = ones.tril() # 下三角矩阵
s_ex12 = token_type_id.unsqueeze(1).unsqueeze(2).float()
s_ex13 = token_type_id.unsqueeze(1).unsqueeze(3).float()
a_mask = (1.0 - s_ex12) * (1.0 - s_ex13) + s_ex13 * a_mask
enc_layers, _ = self.bert(input_tensor, position_ids=position_enc, token_type_ids=token_type_id, attention_mask=a_mask,
output_all_encoded_layers=True)
squence_out = enc_layers[-1] ## 取出来最后一层输出
sequence_hidden, predictions = self.cls(squence_out)
if labels is not None:
## 计算loss
## 需要构建特殊的输出mask 才能计算正确的loss
# 预测的值不用取最后sep符号的结果 因此是到-1
predictions = predictions[:, :-1].contiguous()
target_mask = token_type_id[:, 1:].contiguous()
loss = self.compute_loss(sequence_hidden[0], predictions, labels, target_mask)
return predictions, loss
else :
return predictions
def generate(self, text, out_max_length=40, beam_size=1, max_length=256):
# 对 一个 句子生成相应的结果
## 通过输出最大长度得到输入的最大长度,这里问题不大,如果超过最大长度会进行截断
self.out_max_length = out_max_length
input_max_length = max_length - out_max_length
# print(text)
try:
token_ids, token_type_ids = self.tokenizer.encode(text, max_length=input_max_length)
except:
# 可能是transformer的tokenizer
tokenizer_out = self.tokenizer.encode_plus(text, max_length=input_max_length, truncation=True)
token_ids = tokenizer_out["input_ids"]
token_type_ids = tokenizer_out["token_type_ids"]
token_ids = torch.tensor(token_ids, device=self.device).view(1, -1)
token_type_ids = torch.tensor(token_type_ids, device=self.device).view(1, -1)
out_puts_ids = self.beam_search(token_ids, token_type_ids, self.word2ix, beam_size=beam_size, device=self.device)
return self.tokenizer.decode(out_puts_ids.cpu().numpy())
def random_sample(
self,
inputs,
n,
topk=None,
topp=None,
states=None,
temperature=1,
min_ends=1
):
"""随机采样n个结果
说明:非None的topk表示每一步只从概率最高的topk个中采样;而非None的topp
表示每一步只从概率最高的且概率之和刚好达到topp的若干个token中采样。
返回:n个解码序列组成的list。
"""
inputs = [np.array([i]) for i in inputs]
output_ids = self.first_output_ids
results = []
for step in range(self.maxlen):
probas, states = self.predict(
inputs, output_ids, states, temperature, 'probas'
) # 计算当前概率
probas /= probas.sum(axis=1, keepdims=True) # 确保归一化
if step == 0: # 第1步预测后将结果重复n次
probas = np.repeat(probas, n, axis=0)
inputs = [np.repeat(i, n, axis=0) for i in inputs]
output_ids = np.repeat(output_ids, n, axis=0)
if topk is not None:
k_indices = probas.argpartition(-topk,
axis=1)[:, -topk:] # 仅保留topk
probas = np.take_along_axis(probas, k_indices, axis=1) # topk概率
probas /= probas.sum(axis=1, keepdims=True) # 重新归一化
if topp is not None:
p_indices = probas.argsort(axis=1)[:, ::-1] # 从高到低排序
probas = np.take_along_axis(probas, p_indices, axis=1) # 排序概率
cumsum_probas = np.cumsum(probas, axis=1) # 累积概率
flag = np.roll(cumsum_probas >= topp, 1, axis=1) # 标记超过topp的部分
flag[:, 0] = False # 结合上面的np.roll,实现平移一位的效果
probas[flag] = 0 # 后面的全部置零
probas /= probas.sum(axis=1, keepdims=True) # 重新归一化
sample_func = lambda p: np.random.choice(len(p), p=p) # 按概率采样函数
sample_ids = np.apply_along_axis(sample_func, 1, probas) # 执行采样
sample_ids = sample_ids.reshape((-1, 1)) # 对齐形状
if topp is not None:
sample_ids = np.take_along_axis(
p_indices, sample_ids, axis=1
) # 对齐原id
if topk is not None:
sample_ids = np.take_along_axis(
k_indices, sample_ids, axis=1
) # 对齐原id
output_ids = np.concatenate([output_ids, sample_ids], 1) # 更新输出
is_end = output_ids[:, -1] == self.end_id # 标记是否以end标记结束
end_counts = (output_ids == self.end_id).sum(1) # 统计出现的end标记
if output_ids.shape[1] >= self.minlen: # 最短长度判断
flag = is_end & (end_counts >= min_ends) # 标记已完成序列
if flag.any(): # 如果有已完成的
for ids in output_ids[flag]: # 存好已完成序列
results.append(ids)
flag = (flag == False) # 标记未完成序列
inputs = [i[flag] for i in inputs] # 只保留未完成部分输入
output_ids = output_ids[flag] # 只保留未完成部分候选集
end_counts = end_counts[flag] # 只保留未完成部分end计数
if len(output_ids) == 0:
break
# 如果还有未完成序列,直接放入结果
for ids in output_ids:
results.append(ids)
# 返回结果
return results
def sample_generate(self, text, out_max_length=40, top_k=30,
top_p=0.0, max_length=256, repetition_penalty=1.0,
temperature=1.0, sample_num=1):
input_max_length = max_length - out_max_length
token_ids, token_type_ids = self.tokenizer.encode(text, max_length=input_max_length)
result_list = []
lp = [RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty),
TemperatureLogitsProcessor(temperature=temperature),
TopKLogitsProcessor(top_k=top_k),
TopPLogitsProcessor(top_p=top_p),
]
list_processor = ListProcessor(lp)
token_ids = torch.tensor(token_ids, device=self.device, dtype=torch.long).view(1, -1)
token_type_ids = torch.tensor(token_type_ids, device=self.device, dtype=torch.long).view(1, -1)
device = self.device
output_ids = []
sep_id = self.word2ix["[SEP]"]
with torch.no_grad():
for step in range(out_max_length):
if step == 0:
token_ids = token_ids.repeat((sample_num, 1))
token_type_ids = token_type_ids.repeat((sample_num, 1))
scores = self.forward(token_ids, token_type_ids)
logit_score = torch.log_softmax(scores[:, -1], dim=-1)
logit_score[:, self.word2ix["[UNK]"]] = -float('Inf')
filtered_logits = list_processor(token_ids, logit_score)
# filtered_logits = top_k_top_p_filtering(logit_score, top_k=top_k, top_p=top_p)
next_token = torch.multinomial(F.softmax(filtered_logits, dim=-1), num_samples=1)
if step == 0:
output_ids = next_token.view((sample_num, 1))
else :
output_ids = torch.cat([output_ids, next_token.view((sample_num, 1))], dim=1)
token_ids = torch.cat([token_ids, next_token.view((sample_num, 1)).long()], dim=1)
# token_ids = torch.cat((token_ids, next_token.long()), dim=1)
token_type_ids = torch.cat([token_type_ids, torch.ones((sample_num, 1), device=device, dtype=torch.long)], dim=1)
is_end = (output_ids[:, -1] == sep_id)
if is_end.any():
for ids in output_ids[is_end]:
# 保存输出结果
sample_num -= 1
result_list.append(self.tokenizer.decode(ids.cpu().numpy()[:-1]))
is_end = (is_end == False) # 标记未完成序列
token_ids = token_ids[is_end] # 保留未完成的输入
output_ids = output_ids[is_end] # 只保留未完成部分候选集
if len(output_ids) == 0:
break
token_type_ids = token_type_ids[is_end] # 保留未完成的输入
return result_list
def beam_search(self, token_ids, token_type_ids, word2ix, beam_size=1, device="cpu"):
"""
beam-search操作
"""
sep_id = word2ix["[SEP]"]
# 用来保存输出序列
output_ids = torch.empty(1, 0, device=device, dtype=torch.long)
# 用来保存累计得分
with torch.no_grad():
output_scores = torch.zeros(token_ids.shape[0], device=device)
for step in range(self.out_max_length):
if step == 0:
scores = self.forward(token_ids, token_type_ids)
# 重复beam-size次 输入ids
token_ids = token_ids.view(1, -1).repeat(beam_size, 1)
token_type_ids = token_type_ids.view(1, -1).repeat(beam_size, 1)
else:
scores = self.forward(new_input_ids, new_token_type_ids)
logit_score = torch.log_softmax(scores[:, -1], dim=-1)
logit_score = output_scores.view(-1, 1) + logit_score # 累计得分
## 取topk的时候我们是展平了然后再去调用topk函数
# 展平
logit_score = logit_score.view(-1)
hype_score, hype_pos = torch.topk(logit_score, beam_size)
indice1 = (hype_pos // scores.shape[-1]) # 行索引
indice2 = (hype_pos % scores.shape[-1]).long().reshape(-1, 1) # 列索引
# 更新得分
output_scores = hype_score
output_ids = torch.cat([output_ids[indice1], indice2], dim=1).long()
new_input_ids = torch.cat([token_ids, output_ids], dim=1)
new_token_type_ids = torch.cat([token_type_ids, torch.ones_like(output_ids)], dim=1)
end_counts = (output_ids == sep_id).sum(1) # 统计出现的end标记
best_one = output_scores.argmax()
if end_counts[best_one] == 1:
# 说明出现终止了~
return output_ids[best_one][:-1]
else :
# 保留未完成部分
flag = (end_counts < 1) # 标记未完成序列
if not flag.all(): # 如果有已完成的
token_ids = token_ids[flag]
token_type_ids = token_type_ids[flag]
new_input_ids = new_input_ids[flag]
new_token_type_ids = new_token_type_ids[flag]
output_ids = output_ids[flag] # 扔掉已完成序列
output_scores = output_scores[flag] # 扔掉已完成序列
end_counts = end_counts[flag] # 扔掉已完成end计数
beam_size = flag.sum() # topk相应变化
return output_ids[output_scores.argmax()]
```
#### File: bert_seq2seq/examples/gpt2_generate_article.py
```python
from torch.utils import data
from bert_seq2seq import load_gpt, tokenizer
import torch
from tqdm import tqdm
import os
import time
import glob
from torch.utils.data import Dataset, DataLoader
from bert_seq2seq import Tokenizer, load_chinese_base_vocab
vocab_path = "./state_dict/gpt2/vocab.txt"
model_path = "./state_dict/gpt2/pytorch_model.bin"
model_save_path = "./gpt2_article_gen.bin"
batch_size = 4
lr = 2e-5
word2idx = load_chinese_base_vocab(vocab_path)
data_path = glob.glob("./corpus/THUCNews/*/*.txt")
class SeqDataset(Dataset):
"""
针对特定数据集,定义一个相关的取数据的方式
"""
def __init__(self):
## 一般init函数是加载所有数据
super(SeqDataset, self).__init__()
# 读原始数据
self.idx2word = {k: v for v, k in word2idx.items()}
self.tokenizer = Tokenizer(word2idx)
def __getitem__(self, i):
## 得到单个数据
file_path = data_path[i]
with open(file_path, "r", encoding="utf-8") as f:
lines = f.readlines()
title = lines[0].strip("\n")
content = lines[1:]
content = "".join(content)
content = content.replace("\n", "&").replace(" ", "").replace("&&", "").replace("”", "").replace("“", "")
content = content.split("&")
cons_text = ""
index = 0
while len(cons_text) < 900 and index < len(content):
cons_text += content[index] + "&"
index += 1
# print(title)
# print(cons_text)
# # print(content)328
if len(title) + len(content) > 1024:
if i == 0:
return self.__getitem__(i + 1)
else :
return self.__getitem__(i - 1)
if len(cons_text) == 0:
if i == 0:
return self.__getitem__(i + 1)
else :
return self.__getitem__(i - 1)
token_ids, _ = self.tokenizer.encode(title + "&" + cons_text, max_length=1000)
output = {
"token_ids": token_ids,
}
return output
def __len__(self):
return len(data_path)
def collate_fn(batch):
"""
动态padding, batch为一部分sample
"""
def padding(indice, max_length, pad_idx=0):
"""
pad 函数
"""
pad_indice = [item + [pad_idx] * max(0, max_length - len(item)) for item in indice]
return torch.tensor(pad_indice)
token_ids = [data["token_ids"] for data in batch]
max_length = max([len(t) for t in token_ids])
token_ids_padded = padding(token_ids, max_length)
target_ids_padded = token_ids_padded.clone()
target_ids_padded[target_ids_padded == 0] = -100
return token_ids_padded, target_ids_padded
class Trainer:
def __init__(self):
# 加载数据
# 判断是否有可用GPU
self.device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
print("device: " + str(self.device))
# 定义模型
# self.gpt_model = AutoModelWithLMHead.from_pretrained(model_path)
# self.gpt_model.to(self.device)
# self.gpt_model = transformers.modeling_gpt2.GPT2LMHeadModel.from_pretrained(model_path)
self.gpt_model = load_gpt(word2idx)
## 加载预训练的模型参数~
self.gpt_model.load_pretrain_params(model_path)
# 将模型发送到计算设备(GPU或CPU)
self.gpt_model.set_device(self.device)
# 声明需要优化的参数
self.optim_parameters = list(self.gpt_model.parameters())
self.optimizer = torch.optim.Adam(self.optim_parameters, lr=lr, weight_decay=1e-5)
# 声明自定义的数据加载器
dataset = SeqDataset()
self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
def train(self, epoch):
# 一个epoch的训练
self.gpt_model.train()
self.iteration(epoch, dataloader=self.dataloader, train=True)
def save(self, save_path):
"""
保存模型
"""
self.gpt_model.save_all_params(save_path)
print("{} saved!".format(save_path))
def iteration(self, epoch, dataloader, train=True):
total_loss = 0
report_loss = 0
start_time = time.time() ## 得到当前时间
step = 0
for token_ids, target_ids in tqdm(dataloader, total=len(dataloader)):
# for token_ids, target_ids in tqdm(dataloader:
step += 1
if step % 1000 == 0:
print(f"epoch is {epoch}")
self.gpt_model.eval()
# self.gpt_model.to(torch.device("cpu"))
# text_generator = TextGenerationPipeline(self.gpt_model, tokenizer)
test_data = ["尚品宅配:家具定制模范生。", "今天的天气还不错。", "受双十一影响,阿里巴巴股票今天大涨。"]
for text in test_data:
# out = text_generator(text, max_length=300, do_sample=True)
# print(out)
print(self.gpt_model.sample_generate(text, add_eos=False, top_k=10, out_max_length=900, top_p=0.7, temperature=3.0, repetition_penalty=1.5))
self.gpt_model.train()
# self.gpt_model.to(self.device)
print("report loss is " + str(report_loss))
report_loss = 0.0
self.gpt_model.save_all_params(model_save_path)
print("模型保存完毕")
# 因为传入了target标签,因此会计算loss并且返回
loss, _ = self.gpt_model(token_ids,
labels=target_ids,
)
# 反向传播
if train:
# 清空之前的梯度
self.optimizer.zero_grad()
# 反向传播, 获取新的梯度
loss.backward()
# 用获取的梯度更新模型参数
self.optimizer.step()
# 为计算当前epoch的平均loss
total_loss += loss.item()
report_loss += loss.item()
end_time = time.time()
spend_time = end_time - start_time
# 打印训练信息
print("epoch is " + str(epoch) + ". loss is " + str(total_loss) + ". spend time is " + str(spend_time))
# 保存模型
self.save(model_save_path)
if __name__ == '__main__':
trainer = Trainer()
train_epoches = 5
for epoch in range(train_epoches):
# 训练一个epoch
trainer.train(epoch)
```
#### File: bert_seq2seq/examples/roberta_coarsness_NER_CRF_train.py
```python
import torch
from tqdm import tqdm
import unicodedata
import os
import time
from torch.utils.data import Dataset, DataLoader
from bert_seq2seq import Tokenizer, load_chinese_base_vocab
from bert_seq2seq import load_bert
data_path = "./state_dict/corase_train_update.txt"
vocab_path = "./state_dict/roberta_wwm_vocab.txt" # roberta模型字典的位置
model_name = "roberta" # 选择模型名字
model_path = "./state_dict/roberta_wwm_pytorch_model.bin" # roberta模型位置
recent_model_path = "" # 用于把已经训练好的模型继续训练
model_save_path = "./bert_粗粒度ner_crf.bin"
batch_size = 4
lr = 1e-5
word2idx = load_chinese_base_vocab(vocab_path)
target = ["O", "B-LOC", "I-LOC", "B-PER", "I-PER", "B-ORG", "I-ORG"]
def _is_punctuation(ch):
"""标点符号类字符判断(全/半角均在此内)
"""
code = ord(ch)
return 33 <= code <= 47 or \
58 <= code <= 64 or \
91 <= code <= 96 or \
123 <= code <= 126 or \
unicodedata.category(ch).startswith('P')
def _cjk_punctuation():
return u'\uff02\uff03\uff04\uff05\uff06\uff07\uff08\uff09\uff0a\uff0b\uff0c\uff0d\uff0f\uff1a\uff1b\uff1c\uff1d\uff1e\uff20\uff3b\uff3c\uff3d\uff3e\uff3f\uff40\uff5b\uff5c\uff5d\uff5e\uff5f\uff60\uff62\uff63\uff64\u3000\u3001\u3003\u3008\u3009\u300a\u300b\u300c\u300d\u300e\u300f\u3010\u3011\u3014\u3015\u3016\u3017\u3018\u3019\u301a\u301b\u301c\u301d\u301e\u301f\u3030\u303e\u303f\u2013\u2014\u2018\u2019\u201b\u201c\u201d\u201e\u201f\u2026\u2027\ufe4f\ufe51\ufe54\xb7\uff01\uff1f\uff61\u3002'
def _is_cjk_character(ch):
"""CJK类字符判断(包括中文字符也在此列)
参考:https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
"""
code = ord(ch)
return 0x4E00 <= code <= 0x9FFF or \
0x3400 <= code <= 0x4DBF or \
0x20000 <= code <= 0x2A6DF or \
0x2A700 <= code <= 0x2B73F or \
0x2B740 <= code <= 0x2B81F or \
0x2B820 <= code <= 0x2CEAF or \
0xF900 <= code <= 0xFAFF or \
0x2F800 <= code <= 0x2FA1F
def _is_control(ch):
"""控制类字符判断
"""
return unicodedata.category(ch) in ('Cc', 'Cf')
def word_piece_tokenize(word):
"""word内分成subword
"""
if word in word2idx:
return [word]
tokens = []
start, stop = 0, 0
while start < len(word):
stop = len(word)
while stop > start:
sub = word[start:stop]
if start > 0:
sub = '##' + sub
if sub in word2idx:
break
stop -= 1
if start == stop:
stop += 1
tokens.append(sub)
start = stop
return tokens
def read_corpus(data_path):
"""
读原始数据
"""
sents_src = []
sents_tgt = []
with open(data_path, encoding="utf-8") as f:
lines = f.readlines()
row = ""
t = []
for line in lines:
if line == "\n":
if len(row) < 300:
sents_src.append(row)
sents_tgt.append(t)
row = ""
t = []
continue
line = line.split(" ")
row = row + line[0]
t.append(line[1].strip("\n"))
return sents_src, sents_tgt
## 自定义dataset
class NERDataset(Dataset):
"""
针对特定数据集,定义一个相关的取数据的方式
"""
def __init__(self, sents_src, sents_tgt) :
## 一般init函数是加载所有数据
super(NERDataset, self).__init__()
# 读原始数据
# self.sents_src, self.sents_tgt = read_corpus(poem_corpus_dir)
self.sents_src = sents_src
self.sents_tgt = sents_tgt
self.idx2word = {k: v for v, k in word2idx.items()}
self.tokenizer = Tokenizer(word2idx)
def __getitem__(self, i):
## 得到单个数据
# print(i)
src = self.sents_src[i]
tgt = self.sents_tgt[i]
tgt = ["O"] + tgt + ["O"]
tgt = [target.index(i) for i in tgt ]
token_ids, token_type_ids = self.tokenizer.encode(src)
if len(token_ids) != len(tgt):
print("not equal")
os._exit(0)
output = {
"token_ids": token_ids,
"token_type_ids": token_type_ids,
"target_id": tgt
}
return output
def __len__(self):
return len(self.sents_src)
def collate_fn(batch):
"""
动态padding, batch为一部分sample
"""
def padding(indice, max_length, pad_idx=0):
"""
pad 函数
"""
pad_indice = [item + [pad_idx] * max(0, max_length - len(item)) for item in indice]
return torch.tensor(pad_indice)
token_ids = [data["token_ids"] for data in batch]
max_length = max([len(t) for t in token_ids])
token_type_ids = [data["token_type_ids"] for data in batch]
target_ids = [data["target_id"] for data in batch]
token_ids_padded = padding(token_ids, max_length)
token_type_ids_padded = padding(token_type_ids, max_length)
target_ids_padded = padding(target_ids, max_length)
return token_ids_padded, token_type_ids_padded, target_ids_padded
def viterbi_decode(nodes, trans):
"""
维特比算法 解码
nodes: (seq_len, target_size)
trans: (target_size, target_size)
"""
scores = nodes[0]
scores[1:] -= 100000 # 刚开始标签肯定是"O"
target_size = nodes.shape[1]
seq_len = nodes.shape[0]
labels = torch.arange(0, target_size).view(1, -1)
path = labels
for l in range(1, seq_len):
scores = scores.view(-1, 1)
M = scores + trans + nodes[l].view(1, -1)
scores, ids = M.max(0)
path = torch.cat((path[:, ids], labels), dim=0)
# print(scores)
# print(scores)
return path[:, scores.argmax()]
def ner_print(model, test_data, device="cpu"):
model.eval()
idxtword = {v: k for k, v in word2idx.items()}
tokenier = Tokenizer(word2idx)
trans = model.state_dict()["crf_layer.trans"]
for text in test_data:
decode = []
text_encode, text_ids = tokenier.encode(text)
text_tensor = torch.tensor(text_encode, device=device).view(1, -1)
out = model(text_tensor).squeeze(0) # 其实是nodes
labels = viterbi_decode(out, trans)
starting = False
for l in labels:
if l > 0:
label = target[l.item()]
if label[0] == "B":
decode.append(label[2: ])
starting = True
elif starting:
decode.append(label[2: ])
else:
starting = False
decode.append("O")
else :
decode.append("O")
flag = 0
res = {}
text_decode = [idxtword[i] for i in text_encode]
for index, each_entity in enumerate(decode):
if each_entity != "O":
if flag != each_entity:
# cur_text = "".join([text[t] for t in mapping[index]])
cur_text = text_decode[index]
if each_entity in res.keys():
res[each_entity].append(cur_text)
else :
res[each_entity] = [cur_text]
flag = each_entity
elif flag == each_entity:
res[each_entity][-1] += text_decode[index]
# res[each_entity][-1] += "".join([text[t] for t in mapping[index]])
else :
flag = 0
print(res)
class Trainer:
def __init__(self):
# 加载数据
self.sents_src, self.sents_tgt = read_corpus(data_path)
self.tokenier = Tokenizer(word2idx)
# 判断是否有可用GPU
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: " + str(self.device))
# 定义模型
self.bert_model = load_bert(word2idx, model_name=model_name, model_class="sequence_labeling_crf", target_size=len(target))
## 加载预训练的模型参数~
self.bert_model.load_pretrain_params(model_path)
# 将模型发送到计算设备(GPU或CPU)
self.bert_model.set_device(self.device)
# 声明需要优化的参数
self.optim_parameters = list(self.bert_model.parameters())
self.optimizer = torch.optim.Adam(self.optim_parameters, lr=lr, weight_decay=1e-3)
# 声明自定义的数据加载器
dataset = NERDataset(self.sents_src, self.sents_tgt)
self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
def train(self, epoch):
# 一个epoch的训练
self.bert_model.train()
self.iteration(epoch, dataloader=self.dataloader, train=True)
def save(self, save_path):
"""
保存模型
"""
self.bert_model.save_all_params(save_path)
print("{} saved!".format(save_path))
def iteration(self, epoch, dataloader, train=True):
total_loss = 0
start_time = time.time() ## 得到当前时间
step = 0
for token_ids, token_type_ids, target_ids in tqdm(dataloader,position=0, leave=True):
# print(target_ids.shape)
step += 1
if step % 500 == 0:
test_data = ["日寇在京掠夺文物详情。", "以书结缘,把欧美,港台流行的食品类食谱汇集一堂。", "明天天津下雨,不知道主任还能不能来学校吃个饭。"]
ner_print(self.bert_model, test_data, device=self.device)
self.bert_model.train()
# 因为传入了target标签,因此会计算loss并且返回
predictions, loss = self.bert_model(token_ids,
labels=target_ids
)
# 反向传播
if train:
# 清空之前的梯度
self.optimizer.zero_grad()
# 反向传播, 获取新的梯度
loss.backward()
# 用获取的梯度更新模型参数
self.optimizer.step()
# 为计算当前epoch的平均loss
total_loss += loss.item()
end_time = time.time()
spend_time = end_time - start_time
# 打印训练信息
print("epoch is " + str(epoch)+". loss is " + str(total_loss) + ". spend time is "+ str(spend_time))
# 保存模型
self.save(model_save_path)
if __name__ == '__main__':
trainer = Trainer()
train_epoches = 25
for epoch in range(train_epoches):
# 训练一个epoch
trainer.train(epoch)
# with open("./state_dict/corase_train_update.txt", "a+") as f:
# with open("./corpus/粗粒度NER/人民日报ner数据.txt", "r", encoding="utf-8") as f1 :
# lines = f1.readlines()
# start = 1
# string = ""
# label = ""
# for line in lines:
# if line == "\n":
# f.write("\n")
# continue
# line = line.strip("\n")
# line = line.split(" ")
# if _is_punctuation(line[0]) or _is_cjk_character(line[0]):
# if string != "":
# string = string.lower()
# tokens = word_piece_tokenize(string) # 子词
# for t in tokens:
# if "##" in t:
# f.write(t[2:] + " " + label + "\n")
# else :
# f.write(t + " " + label + "\n")
# # f.write(string + " " + label + "\n")
# string = ""
# label = ""
# f.write(line[0] + " " + line[1] + "\n")
# else :
# string += line[0]
# label = line[1]
```
#### File: bert_seq2seq/examples/roberta_math_ques_train.py
```python
import torch
from tqdm import tqdm
import json
import time
from torch.utils.data import Dataset, DataLoader
from bert_seq2seq import Tokenizer, load_chinese_base_vocab
from bert_seq2seq import load_bert
import re
vocab_path = "./state_dict/roberta_wwm_vocab.txt" # roberta模型字典的位置
word2idx = load_chinese_base_vocab(vocab_path)
model_name = "roberta" # 选择模型名字
model_path = "./state_dict/roberta_wwm_pytorch_model.bin" # 模型位置
recent_model_path = "./state_dict/bert_math_ques_model.bin" # 用于把已经训练好的模型继续训练
model_save_path = "./state_dict/bert_math_ques_model.bin"
batch_size = 16
lr = 1e-5
maxlen = 256
train_data_path = "./state_dict/train.ape.json"
val_data_path = "./state_dict/test.ape.json"
def remove_bucket(equation):
"""去掉冗余的括号
"""
l_buckets, buckets = [], []
for i, c in enumerate(equation):
if c == '(':
l_buckets.append(i)
elif c == ')':
buckets.append((l_buckets.pop(), i))
eval_equation = eval(equation)
for l, r in buckets:
new_equation = '%s %s %s' % (
equation[:l], equation[l + 1:r], equation[r + 1:]
)
try:
if is_equal(eval(new_equation.replace(' ', '')), eval_equation):
equation = new_equation
except:
pass
return equation.replace(' ', '')
def is_equal(a, b):
"""比较两个结果是否相等
"""
a = round(float(a), 6)
b = round(float(b), 6)
return a == b
## 苏神baseline 读取数据
def load_data(filename):
"""读取训练数据,并做一些标准化,保证equation是可以eval的
参考:https://kexue.fm/archives/7809
"""
D = []
# index = 0
for l in open(filename):
# index += 1
# if index == 100:
# break
l = json.loads(l)
# print(l)
question, equation, answer = l['original_text'], l['equation'], l['ans']
# 处理带分数
question = re.sub('(\d+)\((\d+/\d+)\)', '(\\1+\\2)', question)
equation = re.sub('(\d+)\((\d+/\d+)\)', '(\\1+\\2)', equation)
answer = re.sub('(\d+)\((\d+/\d+)\)', '(\\1+\\2)', answer)
equation = re.sub('(\d+)\(', '\\1+(', equation)
answer = re.sub('(\d+)\(', '\\1+(', answer)
# 分数去括号
question = re.sub('\((\d+/\d+)\)', '\\1', question)
# 处理百分数
equation = re.sub('([\.\d]+)%', '(\\1/100)', equation)
answer = re.sub('([\.\d]+)%', '(\\1/100)', answer)
# 冒号转除号、剩余百分号处理
equation = equation.replace(':', '/').replace('%', '/100')
answer = answer.replace(':', '/').replace('%', '/100')
if equation[:2] == 'x=':
equation = equation[2:]
try:
# print(equation)
# print(answer)
# print("~~~~~~~`")
if is_equal(eval(equation), eval(answer)):
D.append((question, remove_bucket(equation), answer))
except Exception as e:
print(e)
continue
return D
class BertDataset(Dataset):
"""
针对特定数据集,定义一个相关的取数据的方式
"""
def __init__(self, data) :
## 一般init函数是加载所有数据
super(BertDataset, self).__init__()
self.data = data
print("data size is " + str(len(data)))
self.idx2word = {k: v for v, k in word2idx.items()}
self.tokenizer = Tokenizer(word2idx)
def __getitem__(self, i):
## 得到单个数据
# print(i)
single_data = self.data[i]
original_text = single_data[0]
ans_text = single_data[1]
token_ids, token_type_ids = self.tokenizer.encode(
original_text, ans_text, max_length=maxlen
)
output = {
"token_ids": token_ids,
"token_type_ids": token_type_ids,
}
return output
def __len__(self):
return len(self.data)
def collate_fn(batch):
"""
动态padding, batch为一部分sample
"""
def padding(indice, max_length, pad_idx=0):
"""
pad 函数
"""
pad_indice = [item + [pad_idx] * max(0, max_length - len(item)) for item in indice]
return torch.tensor(pad_indice)
token_ids = [data["token_ids"] for data in batch]
max_length = max([len(t) for t in token_ids])
token_type_ids = [data["token_type_ids"] for data in batch]
token_ids_padded = padding(token_ids, max_length)
token_type_ids_padded = padding(token_type_ids, max_length)
target_ids_padded = token_ids_padded[:, 1:].contiguous()
return token_ids_padded, token_type_ids_padded, target_ids_padded
class Trainer:
def __init__(self):
# 判断是否有可用GPU
data = load_data(train_data_path)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: " + str(self.device))
# 定义模型
self.bert_model = load_bert(word2idx, model_name=model_name)
## 加载预训练的模型参数~
self.bert_model.load_pretrain_params(model_path)
# 将模型发送到计算设备(GPU或CPU)
self.bert_model.set_device(self.device)
# 声明需要优化的参数
self.optim_parameters = list(self.bert_model.parameters())
self.optimizer = torch.optim.Adam(self.optim_parameters, lr=lr, weight_decay=1e-5)
# 声明自定义的数据加载器
dataset = BertDataset(data)
self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
self.best_acc = 0.0
def train(self, epoch):
# 一个epoch的训练
self.bert_model.train()
self.iteration(epoch, dataloader=self.dataloader, train=True)
def save(self, save_path):
"""
保存模型
"""
self.bert_model.save_all_params(save_path)
print("{} saved!".format(save_path))
def iteration(self, epoch, dataloader, train=True):
total_loss = 0
start_time = time.time() ## 得到当前时间
step = 0
report_loss = 0
print("starting train.......")
# for token_ids, token_type_ids, target_ids in tqdm(dataloader,position=0, leave=True):
for token_ids, token_type_ids, target_ids in dataloader:
step += 1
if step % 3000 == 0:
self.bert_model.eval()
test_data = ["王艳家买了一台洗衣机和一台电冰箱,一共花了6000元,电冰箱的价钱是洗衣机的3/5,求洗衣机的价钱.",
"六1班原来男生占总数的2/5,又转来5名男生,现在男生占总数的5/11,女生有多少人?",
"两个相同的数相乘,积是3600,这个数是多少."]
for text in test_data:
print(self.bert_model.generate(text, beam_size=3, device=self.device))
print("loss is " + str(report_loss))
report_loss = 0
self.bert_model.train()
if step % 10000 == 0:
## 2000步集中测试一下
print("validing..........")
acc = self.validation()
print("valid acc is " + str(acc))
if acc > self.best_acc:
self.best_acc = acc
self.save(model_save_path)
token_ids = token_ids.to(self.device)
token_type_ids = token_type_ids.to(self.device)
target_ids = target_ids.to(self.device)
# 因为传入了target标签,因此会计算loss并且返回
predictions, loss = self.bert_model(token_ids,
token_type_ids,
labels=target_ids,
)
report_loss += loss.item()
# 反向传播
if train:
# 清空之前的梯度
self.optimizer.zero_grad()
# 反向传播, 获取新的梯度
loss.backward()
# 用获取的梯度更新模型参数
self.optimizer.step()
# 为计算当前epoch的平均loss
total_loss += loss.item()
end_time = time.time()
spend_time = end_time - start_time
# 打印训练信息
print("epoch is " + str(epoch)+". loss is " + str(total_loss) + ". spend time is "+ str(spend_time))
# 保存模型
# self.save(model_save_path)
def eval_equation(self, equation):
ans = -10000
try:
ans = eval(equation)
except:
pass
return ans
def validation(self):
val_data = load_data(val_data_path)
# 用0 和 2
self.bert_model.eval()
right = 0.0
num = len(val_data)
# for each_data in tqdm(val_data, total=num):
for each_data in val_data:
equation = self.bert_model.generate(each_data[0], beam_size=3, device=self.device)
pred_ans = self.eval_equation(equation.replace(" ", ""))
ans1 = each_data[2]
try :
if "/" in each_data[2] or "+" in each_data[2] or "-" in each_data[2] or "*" in each_data[2]:
# print(each_data[2])
# equation1 = re.sub('\((\d+/\d+)\)', '\\1', str(each_data[2]))
ans1 = eval(each_data[2])
if abs(float(pred_ans) - float(ans1)) < 0.01:
right += 1
# print("right! pred is " + str(pred_ans) + " ans is " + str(each_data[2]))
else:
pass
# print("err! pred is " + str(pred_ans) + " ans is " + str(each_data[2]))
except Exception as e:
print(e)
self.bert_model.train()
return right / num
if __name__ == '__main__':
trainer = Trainer()
train_epoches = 25
for epoch in range(train_epoches):
# 训练一个epoch
trainer.train(epoch)
```
#### File: bert_seq2seq/examples/roberta_medical_ner_train.py
```python
import torch
import codecs
from tqdm import tqdm
import time
import unicodedata
from torch.utils.data import Dataset, DataLoader
from bert_seq2seq import Tokenizer, load_chinese_base_vocab
from bert_seq2seq import load_bert
# target = ["O", "B-DRUG", "B-DRUG_INGREDIENT", "B-DISEASE", "B-SYMPTOM", "B-SYNDROME", "B-DISEASE_GROUP",
# "B-FOOD", "B-FOOD_GROUP", "B-PERSON_GROUP", "B-DRUG_GROUP", "B-DRUG_DOSAGE", "B-DRUG_TASTE",
# "B-DRUG_EFFICACY", "I-DRUG", "I-DRUG_INGREDIENT", "I-DISEASE", "I-SYMPTOM", "I-SYNDROME", "I-DISEASE_GROUP",
# "I-FOOD", "I-FOOD_GROUP", "I-PERSON_GROUP", "I-DRUG_GROUP", "I-DRUG_DOSAGE", "I-DRUG_TASTE",
# "I-DRUG_EFFICACY"]
target = ["O", "DRUG", "DRUG_INGREDIENT", "DISEASE", "SYMPTOM", "SYNDROME", "DISEASE_GROUP",
"FOOD", "FOOD_GROUP", "PERSON_GROUP", "DRUG_GROUP", "DRUG_DOSAGE", "DRUG_TASTE",
"DRUG_EFFICACY"]
labels2id = {k: v for v, k in enumerate(target)}
vocab_path = "./state_dict/roberta_wwm_vocab.txt" # roberta模型字典的位置
model_name = "roberta" # 选择模型名字
model_path = "./state_dict/roberta_wwm_pytorch_model.bin" # roberta模型位置
recent_model_path = "" # 用于把已经训练好的模型继续训练
model_save_path = "./state_dict/bert_medical_ner_model_crf.bin"
batch_size = 8
lr = 1e-5
crf_lr = 1e-2 ## crf层学习率为0.01
# 加载字典
word2idx = load_chinese_base_vocab(vocab_path)
def _is_punctuation(ch):
"""标点符号类字符判断(全/半角均在此内)
"""
code = ord(ch)
return 33 <= code <= 47 or \
58 <= code <= 64 or \
91 <= code <= 96 or \
123 <= code <= 126 or \
unicodedata.category(ch).startswith('P')
def _cjk_punctuation():
return u'\uff02\uff03\uff04\uff05\uff06\uff07\uff08\uff09\uff0a\uff0b\uff0c\uff0d\uff0f\uff1a\uff1b\uff1c\uff1d\uff1e\uff20\uff3b\uff3c\uff3d\uff3e\uff3f\uff40\uff5b\uff5c\uff5d\uff5e\uff5f\uff60\uff62\uff63\uff64\u3000\u3001\u3003\u3008\u3009\u300a\u300b\u300c\u300d\u300e\u300f\u3010\u3011\u3014\u3015\u3016\u3017\u3018\u3019\u301a\u301b\u301c\u301d\u301e\u301f\u3030\u303e\u303f\u2013\u2014\u2018\u2019\u201b\u201c\u201d\u201e\u201f\u2026\u2027\ufe4f\ufe51\ufe54\xb7\uff01\uff1f\uff61\u3002'
def _is_cjk_character(ch):
"""CJK类字符判断(包括中文字符也在此列)
参考:https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
"""
code = ord(ch)
return 0x4E00 <= code <= 0x9FFF or \
0x3400 <= code <= 0x4DBF or \
0x20000 <= code <= 0x2A6DF or \
0x2A700 <= code <= 0x2B73F or \
0x2B740 <= code <= 0x2B81F or \
0x2B820 <= code <= 0x2CEAF or \
0xF900 <= code <= 0xFAFF or \
0x2F800 <= code <= 0x2FA1F
def _is_control(ch):
"""控制类字符判断
"""
return unicodedata.category(ch) in ('Cc', 'Cf')
def word_piece_tokenize(word):
"""word内分成subword
"""
if word in word2idx:
return [word]
tokens = []
start, stop = 0, 0
while start < len(word):
stop = len(word)
while stop > start:
sub = word[start:stop]
if start > 0:
sub = '##' + sub
if sub in word2idx:
break
stop -= 1
if start == stop:
stop += 1
tokens.append(sub)
start = stop
return tokens
def from_ann2dic(w_path):
for i in range(1000):
print(i)
r_ann_path = "./corpus/医学NER/train/" + str(i) + ".ann"
r_txt_path = "./corpus/医学NER/train/" + str(i) + ".txt"
q_dic = {}
print("开始读取文件:%s" % r_ann_path)
with codecs.open(r_ann_path, "r", encoding="utf-8") as f:
line = f.readline()
line = line.strip("\n\r")
while line != "":
line_arr = line.split()
# print(line_arr)
cls = line_arr[1]
start_index = int(line_arr[2])
end_index = int(line_arr[3])
length = end_index - start_index
for r in range(length):
if r == 0:
q_dic[start_index] = ("%s" % cls)
else:
q_dic[start_index + r] = ("%s" % cls)
line = f.readline()
line = line.strip("\n\r")
print("开始读取文件:%s" % r_txt_path)
with codecs.open(r_txt_path, "r", encoding="utf-8") as f:
content_str = f.read()
content_str = content_str.replace("、", ",")
print("开始写入文本%s" % w_path)
with codecs.open(w_path, encoding="utf-8", mode="a+") as w:
for i, char in enumerate(content_str):
if char == " " or char == "" or char == "\n" or char == "\r" or char == "<" or char == ">" or char == "b" or char == "r" or char == "/" or unicodedata.category(char) == 'Zs' or char == "-":
continue
else:
if i in q_dic:
tag = q_dic[i]
else:
tag = "O" # 大写字母O
w.write('%s %s\n' % (char, tag))
# w.write('%s\n' % "END O")
def load_data(path: str):
"""
加载数据
"""
src_data = []
labels_data = []
with open(path) as f :
line = f.readline()
line = line.strip("\n")
temp_list = ""
temp_label_list = [0]
# index = 0
while line != "":
##开始一行一行读数据
line_split = line.split(" ")
# print(line_split)
if line_split[0] == "。":
temp_label_list.append(0)
src_data.append(temp_list)
labels_data.append(temp_label_list)
temp_list = ""
temp_label_list = [0]
else :
temp_list += (line_split[0])
temp_label_list.append(labels2id[line_split[1]])
line = f.readline()
line = line.strip("\n")
print("原始数据大小为:" + str(len(src_data)))
save_src_data = []
save_label_data = []
for src, label in zip(src_data, labels_data):
if len(src) < 5:
# print(src)
continue
save_src_data.append(src)
save_label_data.append(label)
# retain = 0
print("清洗后数据大小为:" + str(len(save_src_data)))
return save_src_data, save_label_data
## 自定义dataset
class NERDataset(Dataset):
"""
针对特定数据集,定义一个相关的取数据的方式
"""
def __init__(self, sents_src, sents_tgt) :
## 一般init函数是加载所有数据
super(NERDataset, self).__init__()
# 读原始数据
# self.sents_src, self.sents_tgt = read_corpus(poem_corpus_dir)
self.sents_src = sents_src
self.sents_tgt = sents_tgt
self.idx2word = {k: v for v, k in word2idx.items()}
self.tokenizer = Tokenizer(word2idx)
def __getitem__(self, i):
## 得到单个数据
# print(i)
src = self.sents_src[i]
tgt = self.sents_tgt[i]
token_ids, token_type_ids = self.tokenizer.encode(src)
if len(token_ids) != len(tgt):
print(len(token_ids))
print(len(tgt))
print(src)
print(self.tokenizer.decode(token_ids))
print(tgt)
self.__getitem__(i + 1)
output = {
"token_ids": token_ids,
"token_type_ids": token_type_ids,
"target_id": tgt
}
return output
def __len__(self):
return len(self.sents_src)
def collate_fn(batch):
"""
动态padding, batch为一部分sample
"""
def padding(indice, max_length, pad_idx=0):
"""
pad 函数
"""
pad_indice = [item + [pad_idx] * max(0, max_length - len(item)) for item in indice]
return torch.tensor(pad_indice)
token_ids = [data["token_ids"] for data in batch]
max_length = max([len(t) for t in token_ids])
token_type_ids = [data["token_type_ids"] for data in batch]
target_ids = [data["target_id"] for data in batch]
token_ids_padded = padding(token_ids, max_length)
token_type_ids_padded = padding(token_type_ids, max_length)
target_ids_padded = padding(target_ids, max_length)
return token_ids_padded, token_type_ids_padded, target_ids_padded
def viterbi_decode(nodes, trans):
"""
维特比算法 解码
nodes: (seq_len, target_size)
trans: (target_size, target_size)
"""
scores = nodes[0]
scores[1:] -= 100000 # 刚开始标签肯定是"O"
target_size = nodes.shape[1]
seq_len = nodes.shape[0]
labels = torch.arange(0, target_size).view(1, -1)
path = labels
for l in range(1, seq_len):
scores = scores.view(-1, 1)
M = scores + trans + nodes[l].view(1, -1)
scores, ids = M.max(0)
path = torch.cat((path[:, ids], labels), dim=0)
# print(scores)
# print(scores)
return path[:, scores.argmax()]
def ner_print(model, test_data):
model.eval()
idxtword = {v: k for k, v in word2idx.items()}
tokenier = Tokenizer(word2idx)
trans = model.state_dict()["crf_layer.trans"]
for text in test_data:
decode = []
text_encode, text_ids = tokenier.encode(text)
text_tensor = torch.tensor(text_encode, device=model.device).view(1, -1)
out = model(text_tensor).squeeze(0) # 其实是nodes
labels = viterbi_decode(out, trans)
starting = False
for l in labels:
if l > 0:
label = target[l.item()]
decode.append(label)
else :
decode.append("O")
flag = 0
res = {}
# print(decode)
# print(text)
decode_text = [idxtword[i] for i in text_encode]
for index, each_entity in enumerate(decode):
if each_entity != "O":
if flag != each_entity:
cur_text = decode_text[index]
if each_entity in res.keys():
res[each_entity].append(cur_text)
else :
res[each_entity] = [cur_text]
flag = each_entity
elif flag == each_entity:
res[each_entity][-1] += decode_text[index]
else :
flag = 0
print(res)
class Trainer:
def __init__(self):
# 加载数据
self.sents_src, self.sents_tgt = load_data("./state_dict/medical_ner_update.txt")
self.tokenier = Tokenizer(word2idx)
# 判断是否有可用GPU
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: " + str(self.device))
# 定义模型
self.bert_model = load_bert(word2idx, model_name=model_name, model_class="sequence_labeling_crf", target_size=len(target))
## 加载预训练的模型参数~
self.bert_model.load_pretrain_params(model_path)
# 将模型发送到计算设备(GPU或CPU)
self.bert_model.set_device(self.device)
# 声明需要优化的参数
crf_params = list(map(id, self.bert_model.crf_layer.parameters())) ## 单独把crf层参数拿出来
base_params = filter(lambda p: id(p) not in crf_params, self.bert_model.parameters())
self.optimizer = torch.optim.Adam([
{"params": base_params},
{"params": self.bert_model.crf_layer.parameters(), "lr": crf_lr}], lr=lr, weight_decay=1e-5)
# 声明自定义的数据加载器
dataset = NERDataset(self.sents_src, self.sents_tgt)
self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
def train(self, epoch):
# 一个epoch的训练
self.bert_model.train()
self.iteration(epoch, dataloader=self.dataloader, train=True)
def save(self, save_path):
"""
保存模型
"""
self.bert_model.save_all_params(save_path)
print("{} saved!".format(save_path))
def iteration(self, epoch, dataloader, train=True):
total_loss = 0
start_time = time.time() ## 得到当前时间
step = 0
for token_ids, token_type_ids, target_ids in tqdm(dataloader,position=0, leave=True):
# print(target_ids.shape)
step += 1
if step % 300 == 0:
test_data = ["补气养血,调经止带,用于月经不调经期腹痛,非处方药物甲类,国家基本药物目录2012如果服用任何其他药品请告知医师或药师包括任何从药房超市或保健品商店购买的非处方药品。",
"月经过多孕妇忌服。黑褐色至黑色的小蜜丸味甜微苦。",
"红虎灌肠液50毫升装,安徽天洋药业清热解毒,化湿除带,祛瘀止痛,散结消癥,用于慢性盆腔炎所致小腹疼痛腰,骶酸痛带下量多或有发热。"]
ner_print(self.bert_model, test_data)
self.bert_model.train()
# 因为传入了target标签,因此会计算loss并且返回
predictions, loss = self.bert_model(token_ids,
labels=target_ids,
# use_layer_num=3
)
# 反向传播
if train:
# 清空之前的梯度
self.optimizer.zero_grad()
# 反向传播, 获取新的梯度
loss.backward()
# 用获取的梯度更新模型参数
self.optimizer.step()
# 为计算当前epoch的平均loss
total_loss += loss.item()
end_time = time.time()
spend_time = end_time - start_time
# 打印训练信息
print("epoch is " + str(epoch)+". loss is " + str(total_loss) + ". spend time is "+ str(spend_time))
# 保存模型
self.save(model_save_path)
if __name__ == '__main__':
trainer = Trainer()
train_epoches = 50
for epoch in range(train_epoches):
# 训练一个epoch
trainer.train(epoch)
# from_ann2dic("./state_dict/medical_ner.txt")
# with open("./state_dict/medical_ner_update.txt", "a+") as f:
# with open("./state_dict/medical_ner.txt", "r", encoding="utf-8") as f1 :
# lines = f1.readlines()
# start = 1
# string = ""
# label = ""
# for line in lines:
# if line == "\n":
# f.write("\n")
# continue
# line = line.strip("\n")
# line = line.split(" ")
# if _is_punctuation(line[0]) or _is_cjk_character(line[0]):
# if string != "":
# string = string.lower()
# tokens = word_piece_tokenize(string) # 子词
# for t in tokens:
# if "##" in t:
# f.write(t[2:] + " " + label + "\n")
# else :
# f.write(t + " " + label + "\n")
# # f.write(string + " " + label + "\n")
# string = ""
# label = ""
# f.write(line[0] + " " + line[1] + "\n")
# else :
# string += line[0]
# label = line[1]
```
#### File: bert_seq2seq/examples/roberta_poem_train.py
```python
import torch
from tqdm import tqdm
import pandas as pd
import os
import time
from torch.utils.data import Dataset, DataLoader
from bert_seq2seq import Tokenizer, load_chinese_base_vocab
from bert_seq2seq import load_bert
vocab_path = "./state_dict/roberta_wwm_vocab.txt" # roberta模型字典的位置
model_name = "roberta" # 选择模型名字
model_path = "./state_dict/roberta_wwm_pytorch_model.bin" # roberta模型位置
recent_model_path = "./bert_model_poem.bin" # 用于把已经训练好的模型继续训练
model_save_path = "./bert_model_poem.bin"
batch_size = 16
lr = 1e-5
word2idx, keep_tokens = load_chinese_base_vocab(vocab_path, simplfied=True)
def read_corpus(dir_path):
"""
读原始数据
"""
sents_src = []
sents_tgt = []
tokenizer = Tokenizer(word2idx)
files= os.listdir(dir_path) #得到文件夹下的所有文件名称
for file1 in files: #遍历文件夹
if not os.path.isdir(file1): #判断是否是文件夹,不是文件夹才打开
file_path = dir_path + "/" + file1
print(file_path)
if file_path[-3:] != "csv":
continue
df = pd.read_csv(file_path)
# 先判断诗句的类型 再确定是否要构造数据
for index, row in df.iterrows():
if type(row[0]) is not str or type(row[3]) is not str:
continue
if len(row[0].split(" ")) > 1:
# 说明题目里面存在空格,只要空格前面的数据
row[0] = row[0].split(" ")[0]
if len(row[0]) > 10 or len(row[0]) < 1:
# 过滤掉题目长度过长和过短的诗句
continue
encode_text = tokenizer.encode(row[3])[0]
if word2idx["[UNK]"] in encode_text:
# 过滤unk字符
continue
if len(row[3]) == 24 and (row[3][5] == "," or row[3][5] == "。"):
# 五言绝句
sents_src.append(row[0] + "##" + "五言绝句")
sents_tgt.append(row[3])
elif len(row[3]) == 32 and (row[3][7] == "," or row[3][7] == "。"):
# 七言绝句
sents_src.append(row[0] + "##" + "七言绝句")
sents_tgt.append(row[3])
elif len(row[3]) == 48 and (row[3][5] == "," or row[3][5] == "。"):
# 五言律诗
sents_src.append(row[0] + "##" + "五言律诗")
sents_tgt.append(row[3])
elif len(row[3]) == 64 and (row[3][7] == "," or row[3][7] == "。"):
# 七言律诗
sents_src.append(row[0] + "##" + "七言律诗")
sents_tgt.append(row[3])
# print("第一次诗句共: " + str(len(sents_src)) + "篇")
return sents_src, sents_tgt
class BertDataset(Dataset):
"""
针对特定数据集,定义一个相关的取数据的方式
"""
def __init__(self, sents_src, sents_tgt) :
## 一般init函数是加载所有数据
super(BertDataset, self).__init__()
# 读原始数据
# self.sents_src, self.sents_tgt = read_corpus(poem_corpus_dir)
self.sents_src = sents_src
self.sents_tgt = sents_tgt
self.idx2word = {k: v for v, k in word2idx.items()}
self.tokenizer = Tokenizer(word2idx)
def __getitem__(self, i):
## 得到单个数据
src = self.sents_src[i]
tgt = self.sents_tgt[i]
token_ids, token_type_ids = self.tokenizer.encode(src, tgt)
output = {
"token_ids": token_ids,
"token_type_ids": token_type_ids,
}
return output
def __len__(self):
return len(self.sents_src)
def collate_fn(batch):
"""
动态padding, batch为一部分sample
"""
def padding(indice, max_length, pad_idx=0):
"""
pad 函数
"""
pad_indice = [item + [pad_idx] * max(0, max_length - len(item)) for item in indice]
return torch.tensor(pad_indice)
token_ids = [data["token_ids"] for data in batch]
max_length = max([len(t) for t in token_ids])
token_type_ids = [data["token_type_ids"] for data in batch]
token_ids_padded = padding(token_ids, max_length)
token_type_ids_padded = padding(token_type_ids, max_length)
target_ids_padded = token_ids_padded[:, 1:].contiguous()
return token_ids_padded, token_type_ids_padded, target_ids_padded
class PoemTrainer:
def __init__(self):
# 加载数据
data_dir = "./corpus/Poetry"
self.sents_src, self.sents_tgt = read_corpus(data_dir)
# 判断是否有可用GPU
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: " + str(self.device))
# 定义模型
self.bert_model = load_bert(word2idx, model_name=model_name)
## 加载预训练的模型参数~
self.bert_model.load_pretrain_params(model_path, keep_tokens=keep_tokens)
# 将模型发送到计算设备(GPU或CPU)
self.bert_model.set_device(self.device)
# 声明需要优化的参数
self.optim_parameters = list(self.bert_model.parameters())
self.optimizer = torch.optim.Adam(self.optim_parameters, lr=lr, weight_decay=1e-3)
# 声明自定义的数据加载器
dataset = BertDataset(self.sents_src, self.sents_tgt)
self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
def train(self, epoch):
# 一个epoch的训练
self.bert_model.train()
self.iteration(epoch, dataloader=self.dataloader, train=True)
def save(self, save_path):
"""
保存模型
"""
self.bert_model.save_all_params(save_path)
print("{} saved!".format(save_path))
def iteration(self, epoch, dataloader, train=True):
total_loss = 0
start_time = time.time() ## 得到当前时间
step = 0
for token_ids, token_type_ids, target_ids in tqdm(dataloader,position=0, leave=True):
step += 1
if step % 3000 == 0:
self.bert_model.eval()
test_data = ["北国风光##五言绝句", "题西林壁##七言绝句", "长安早春##五言律诗"]
for text in test_data:
print(self.bert_model.generate(text, beam_size=3, is_poem=True))
self.bert_model.train()
token_ids = token_ids.to(self.device)
token_type_ids = token_type_ids.to(self.device)
target_ids = target_ids.to(self.device)
# 因为传入了target标签,因此会计算loss并且返回
predictions, loss = self.bert_model(token_ids,
token_type_ids,
labels=target_ids,
)
# 反向传播
if train:
# 清空之前的梯度
self.optimizer.zero_grad()
# 反向传播, 获取新的梯度
loss.backward()
# 用获取的梯度更新模型参数
self.optimizer.step()
# 为计算当前epoch的平均loss
total_loss += loss.item()
end_time = time.time()
spend_time = end_time - start_time
# 打印训练信息
print("epoch is " + str(epoch)+". loss is " + str(total_loss) + ". spend time is "+ str(spend_time))
# 保存模型
self.save(model_save_path)
if __name__ == '__main__':
trainer = PoemTrainer()
train_epoches = 50
for epoch in range(train_epoches):
# 训练一个epoch
trainer.train(epoch)
```
#### File: bert_seq2seq/examples/roberta_relation_extract_train.py
```python
import random
import torch
from tqdm import tqdm
import json
from torch.utils.data import Dataset, DataLoader
from bert_seq2seq import Tokenizer, load_chinese_base_vocab
from bert_seq2seq import load_bert
import numpy as np
import time
vocab_path = "./state_dict/roberta_wwm_vocab.txt" # roberta模型字典的位置
model_name = "roberta" # 选择模型名字
model_path = "./state_dict/roberta_wwm_pytorch_model.bin" # roberta模型位置
recent_model_path = "" # 用于把已经训练好的模型继续训练
model_save_path = "./state_dict/bert_model_relation_extrac.bin"
all_p_path = "./corpus/三元组抽取/all_50_schemas" # 穷举所有p。
data_path = "./corpus/三元组抽取/train_data.json" # 训练集
data_dev = "./corpus/三元组抽取/dev_data.json" # 验证集
batch_size = 16
lr = 1e-5
word2idx = load_chinese_base_vocab(vocab_path)
idx2word = {v: k for k, v in word2idx.items()}
tokenizer = Tokenizer(word2idx)
def load_data(filename):
D = []
with open(filename, encoding='utf-8') as f:
for l in f:
l = json.loads(l)
D.append({
'text': l['text'],
'spo_list': [(spo['subject'], spo['predicate'], spo['object'])
for spo in l['spo_list']]
})
return D
predicate2id, id2predicate = {}, {}
with open(all_p_path, encoding="utf-8") as f:
for l in f:
l = json.loads(l)
if l['predicate'] not in predicate2id:
id2predicate[len(predicate2id)] = l['predicate']
predicate2id[l['predicate']] = len(predicate2id)
def search(pattern, sequence):
"""从sequence中寻找子串pattern
如果找到,返回第一个下标;否则返回-1。
"""
n = len(pattern)
for i in range(len(sequence)):
if sequence[i:i + n] == pattern:
return i
return -1
def search_subject(token_ids, subject_labels):
# subject_labels: (lens, 2)
if type(subject_labels) is torch.Tensor:
subject_labels = subject_labels.numpy()
if type(token_ids) is torch.Tensor:
token_ids = token_ids.cpu().numpy()
subjects = []
subject_ids = []
start = -1
end = -1
for i in range(len(token_ids)):
if subject_labels[i, 0] > 0.5:
start = i
for j in range(len(token_ids)):
if subject_labels[j, 1] > 0.5:
subject_labels[j, 1] = 0
end = j
break
if start == -1 or end == -1:
continue
subject = ""
for k in range(start, end + 1):
subject += idx2word[token_ids[k]]
# print(subject)
subject_ids.append([start, end])
start = -1
end = -1
subjects.append(subject)
return subjects, subject_ids
def search_object(token_ids, object_labels):
objects = []
if type(object_labels) is torch.Tensor:
object_labels = object_labels.numpy()
if type(token_ids) is torch.Tensor:
token_ids = token_ids.cpu().numpy()
# print(object_labels.sum())
start = np.where(object_labels[:, :, 0] > 0.5)
end = np.where(object_labels[:, :, 1] > 0.5)
for _start, predicate1 in zip(*start):
for _end, predicate2 in zip(*end):
if _start <= _end and predicate1 == predicate2:
object_text = ""
for k in range(_start, _end + 1):
# print(token_ids(k))
object_text += idx2word[token_ids[k]]
objects.append(
(id2predicate[predicate1], object_text)
)
break
return objects
class ExtractDataset(Dataset):
"""
针对特定数据集,定义一个相关的取数据的方式
"""
def __init__(self, data):
## 一般init函数是加载所有数据
super(ExtractDataset, self).__init__()
# 读原始数据
self.data = data
self.idx2word = {k: v for v, k in word2idx.items()}
def __getitem__(self, i):
## 得到单个数据
# print(i)
d = self.data[i]
token_ids, segment_ids = tokenizer.encode(d["text"], max_length=256)
spoes = {}
for s, p, o in d['spo_list']:
s = tokenizer.encode(s)[0][1:-1]
p = predicate2id[p]
o = tokenizer.encode(o)[0][1:-1]
s_idx = search(s, token_ids)
o_idx = search(o, token_ids)
if s_idx != -1 and o_idx != -1:
s = (s_idx, s_idx + len(s) - 1)
o = (o_idx, o_idx + len(o) - 1, p)
if s not in spoes:
spoes[s] = []
spoes[s].append(o)
if spoes:
# subject标签
subject_labels = np.zeros((len(token_ids), 2))
for s in spoes:
subject_labels[s[0], 0] = 1
subject_labels[s[1], 1] = 1
# 随机选一个subject
start, end = random.choice(list(spoes.keys()))
subject_ids = (start, end)
# 对应的object标签
object_labels = np.zeros((len(token_ids), len(predicate2id), 2))
for o in spoes.get(subject_ids, []):
object_labels[o[0], o[2], 0] = 1
object_labels[o[1], o[2], 1] = 1
output = {
"token_ids": token_ids,
"token_type_ids": segment_ids,
"subject_labels": subject_labels,
"subject_ids": subject_ids,
"object_labels": object_labels,
}
return output
else:
return self.__getitem__(i + 1)
def __len__(self):
return len(self.data)
def collate_fn(batch):
"""
动态padding, batch为一部分sample
"""
def padding(inputs, max_length=None, padding=0):
"""Numpy函数,将序列padding到同一长度
"""
if max_length is None:
max_length = max([len(x) for x in inputs])
pad_width = [(0, 0) for _ in np.shape(inputs[0])]
outputs = []
for x in inputs:
x = x[:max_length]
pad_width[0] = (0, max_length - len(x))
x = np.pad(x, pad_width, 'constant', constant_values=padding)
outputs.append(x)
return np.array(outputs)
token_ids = [data["token_ids"] for data in batch]
max_length = max([len(t) for t in token_ids])
token_type_ids = [data["token_type_ids"] for data in batch]
subject_labels = [data["subject_labels"] for data in batch]
object_labels = [data["object_labels"] for data in batch]
subject_ids = [data["subject_ids"] for data in batch]
token_ids_padded = padding(token_ids, max_length)
token_type_ids_padded = padding(token_type_ids, max_length)
subject_labels_padded = padding(subject_labels, max_length)
object_labels_padded = padding(object_labels, max_length)
subject_ids = np.array(subject_ids)
return torch.tensor(token_ids_padded, dtype=torch.long), torch.tensor(token_type_ids_padded, dtype=torch.float32), \
torch.tensor(subject_labels_padded, dtype=torch.long), torch.tensor(object_labels_padded, dtype=torch.long), \
torch.tensor(subject_ids, dtype=torch.long)
class ExtractTrainer:
def __init__(self):
# 加载数据
self.data = load_data(data_path)
self.data_dev = load_data(data_dev)
# 判断是否有可用GPU
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device: " + str(self.device))
# 定义模型
self.bert_model = load_bert(word2idx, model_name=model_name, model_class="relation_extrac",
target_size=len(predicate2id))
# 加载预训练的模型参数~
self.bert_model.load_pretrain_params(model_path)
# 将模型发送到计算设备(GPU或CPU)
self.bert_model.set_device(self.device)
# 声明需要优化的参数
self.optim_parameters = list(self.bert_model.parameters())
self.optimizer = torch.optim.Adam(self.optim_parameters, lr=lr, weight_decay=1e-3)
# 声明自定义的数据加载器
dataset = ExtractDataset(self.data)
self.dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)
self.best_f1 = 0.0
def train(self, epoch):
# 一个epoch的训练
self.bert_model.train()
self.iteration(epoch, dataloader=self.dataloader, train=True)
def save(self, save_path):
"""
保存模型
"""
self.bert_model.save_all_params(save_path)
print("{} saved!".format(save_path))
def test(self, data_dev):
self.bert_model.eval()
f = open('./state_dict/dev_pred.json', 'w', encoding='utf-8')
X, Y, Z = 1e-10, 1e-10, 1e-10
for tspo in tqdm(data_dev):
text = tspo["text"]
spo = tspo["spo_list"]
token_ids, segment_ids = tokenizer.encode(text, max_length=256)
token_ids = torch.tensor(token_ids, device=self.device).view(1, -1)
# 预测 subject
subject_preds = self.bert_model.predict_subject(token_ids)
# gpu 写法
s = np.where(subject_preds.cuda().data.cpu().numpy()[0].T[0] > 0.5)[0]
e = np.where(subject_preds.cuda().data.cpu().numpy()[0].T[1] > 0.5)[0]
subject_ix = []
for i in s:
end = e[e > i]
if len(end) > 0:
subject_ix.append((i, end[0]))
# for i,j in subject_ix:
# print(tokenizer.decode(token_ids[0][i:j+1].numpy()))
spoes = []
for i in subject_ix:
subject_id = np.array([i])
object_predicate = self.bert_model.predict_object_predicate(token_ids,
torch.tensor(subject_id,device=self.device, dtype=torch.long))
for object_pred in object_predicate:
start = np.where(object_pred.cuda().data.cpu().numpy()[:, :, 0] > 0.5)
end = np.where(object_pred.cuda().data.cpu().numpy()[:, :, 1] > 0.5)
for _start, predicate1 in zip(*start):
for _end, predicate2 in zip(*end):
if _start <= _end and predicate1 == predicate2:
spoes.append(
(i, predicate1,
(_start, _end))
)
break
spoes = [(tokenizer.decode(token_ids.cuda().data.cpu().numpy()[0][i[0]:i[1] + 1]).replace(" ", ""), id2predicate[p],
tokenizer.decode(token_ids.cuda().data.cpu().numpy()[0][j[0]:j[1] + 1]).replace(" ", "")) for i, p, j in spoes]
R = set(spoes)
T = set(spo)
X += len(R & T)
Y += len(R)
Z += len(T)
s = json.dumps({
'text': tspo['text'],
'spo_list': list(spo),
'spo_list_pred': list(spoes),
'new': list(R - T),
'lack': list(T - R),
},
ensure_ascii=False,
indent=4)
f.write(s + '\n')
f1, precision, recall = 2 * X / (Y + Z), X / Y, X / Z
f.close()
self.bert_model.train()
return f1, recall, precision
def iteration(self, epoch, dataloader, train=True):
total_loss = 0
start_time = time.time() # 得到当前时间
step = 0
report_loss = 0.0
for token_ids, token_type_ids, subject_lables, object_labels, subject_ids in tqdm(dataloader):
step += 1
if step % 300 == 0:
print("report loss is " + str(report_loss))
report_loss = 0.0
text = ["查尔斯·阿兰基斯(<NAME>),1989年4月17日出生于智利圣地亚哥,智利职业足球运动员,司职中场,效力于德国足球甲级联赛勒沃库森足球俱乐部",
"李治即位后,萧淑妃受宠,王皇后为了排挤萧淑妃,答应李治让身在感业寺的武则天续起头发,重新纳入后宫",
"《星空黑夜传奇》是连载于起点中文网的网络小说,作者是啤酒的罪孽"]
for d in text:
with torch.no_grad():
token_ids_test, segment_ids = tokenizer.encode(d, max_length=256)
token_ids_test = torch.tensor(token_ids_test, device=self.device).view(1, -1)
# 先预测subject
pred_subject = self.bert_model.predict_subject(token_ids_test)
pred_subject = pred_subject.squeeze(0)
subject_texts, subject_idss = search_subject(token_ids_test[0], pred_subject.cpu())
if len(subject_texts) == 0:
print("no subject predicted~")
for sub_text, sub_ids in zip(subject_texts, subject_idss):
print("subject is " + str(sub_text))
sub_ids = torch.tensor(sub_ids, device=self.device).view(1, -1)
# print("sub_ids shape is " + str(sub_ids))
object_p_pred = self.bert_model.predict_object_predicate(token_ids_test, sub_ids)
res = search_object(token_ids_test[0], object_p_pred.squeeze(0).cpu())
print("p and obj is " + str(res))
if step % 2000 == 0:
f1, recall, acc = self.test(self.data_dev)
if f1 > self.best_f1:
self.best_f1 = f1
# 保存模型
self.save(model_save_path)
print("dev f1: " + str(f1) + " .acc: " + str(acc) + " .recall: " + str(recall) + " best_f1:" + str(self.best_f1))
# 因为传入了target标签,因此会计算loss并且返回
predictions, loss = self.bert_model(token_ids,
subject_ids,
subject_labels=subject_lables,
object_labels=object_labels,
)
# 反向传播
if train:
# 清空之前的梯度
self.optimizer.zero_grad()
# 反向传播, 获取新的梯度
loss.backward()
torch.nn.utils.clip_grad_norm_(self.bert_model.parameters(), 5.0)
# 用获取的梯度更新模型参数
self.optimizer.step()
# 为计算当前epoch的平均loss
total_loss += loss.item()
report_loss += loss.item()
end_time = time.time()
spend_time = end_time - start_time
# 打印训练信息
print("epoch is " + str(epoch) + ". loss is " + str(total_loss) + ". spend time is " + str(spend_time))
# f1, recall, acc = self.test(self.data_dev)
# if f1 > self.best_f1:
# self.best_f1 = f1
# # 保存模型
# self.save(model_save_path)
# print("dev f1: " + str(f1) + " .acc: " + str(acc) + " .recall: " + str(recall) + " best_f1:" + str(self.best_f1))
if __name__ == "__main__":
trainer = ExtractTrainer()
train_epoches = 50
for epoch in range(train_epoches):
# 训练一个epoch
trainer.train(epoch)
``` |
{
"source": "6666ev/CDial-GPT",
"score": 3
} |
#### File: CDial-GPT/data/process.py
```python
import json
import os
def get_set(data_path="data/legal/dev.json"):
devset=[]
with open(data_path,"r") as f:
json_obj=json.load(f)
for obj in json_obj:
article=obj["article"].replace("\n","")
summarization=obj["summarization"].replace("\n","")
devset.append([article,summarization])
return devset
dataset={}
dataset["train"]=get_set("data/legal/train.json")
dataset["valid"]=get_set("data/legal/dev.json")
dataset["test"]=get_set("data/legal/test.json")
with open("data/legal/legal_data.json","w") as f:
json.dump(dataset,f,ensure_ascii=False)
``` |
{
"source": "666asd/pfp",
"score": 2
} |
#### File: pfp/native/compat_io.py
```python
import six
import sys
from pfp.native import native
import pfp.fields
import pfp.errors as errors
import pfp.bitwrap as bitwrap
# http://www.sweetscape.com/010editor/manual/FuncIO.htm
# void BigEndian()
@native(name="BigEndian", ret=pfp.fields.Void)
def BigEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
pfp.fields.NumberBase.endian = pfp.fields.BIG_ENDIAN
# void BitfieldDisablePadding()
@native(name="BitfieldDisablePadding", ret=pfp.fields.Void, send_interp=True)
def BitfieldDisablePadding(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_padded(False)
# void BitfieldEnablePadding()
@native(name="BitfieldEnablePadding", ret=pfp.fields.Void, send_interp=True)
def BitfieldEnablePadding(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_padded(True)
# void BitfieldLeftToRight()
@native(name="BitfieldLeftToRight", ret=pfp.fields.Void, send_interp=True)
def BitfieldLeftToRight(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_direction(interp.BITFIELD_DIR_LEFT_RIGHT)
# void BitfieldRightToLeft()
@native(name="BitfieldRightToLeft", ret=pfp.fields.Void, send_interp=True)
def BitfieldRightToLeft(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
interp.set_bitfield_direction(interp.BITFIELD_DIR_RIGHT_LEFT)
# double ConvertBytesToDouble( uchar byteArray[] )
@native(name="ConvertBytesToDouble", ret=pfp.fields.Double)
def ConvertBytesToDouble(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# float ConvertBytesToFloat( uchar byteArray[] )
@native(name="ConvertBytesToFloat", ret=pfp.fields.Float)
def ConvertBytesToFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# hfloat ConvertBytesToHFloat( uchar byteArray[] )
@native(name="ConvertBytesToHFloat", ret=pfp.fields.Float)
def ConvertBytesToHFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int ConvertDataToBytes( data_type value, uchar byteArray[] )
@native(name="ConvertDataToBytes", ret=pfp.fields.Int)
def ConvertDataToBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void DeleteBytes( int64 start, int64 size )
@native(name="DeleteBytes", ret=pfp.fields.Void)
def DeleteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int DirectoryExists( string dir )
@native(name="DirectoryExists", ret=pfp.fields.Int)
def DirectoryExists(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int FEof()
@native(name="FEof", ret=pfp.fields.Int)
def FEof(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
# now that streams are _ALL_ BitwrappedStreams, we can use BitwrappedStream-specific
# functions
if stream.is_eof():
return 1
else:
return 0
# int64 FileSize()
@native(name="FileSize", ret=pfp.fields.Int64)
def FileSize(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return stream.size()
# TFileList FindFiles( string dir, string filter )
@native(name="FindFiles", ret=pfp.fields.Void)
def FindFiles(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int FPrintf( int fileNum, char format[], ... )
@native(name="FPrintf", ret=pfp.fields.Int)
def FPrintf(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int FSeek( int64 pos )
@native(name="FSeek", ret=pfp.fields.Int)
def FSeek(params, ctxt, scope, stream, coord):
"""Returns 0 if successful or -1 if the address is out of range
"""
if len(params) != 1:
raise errors.InvalidArguments(
coord,
"{} args".format(len(params)),
"FSeek accepts only one argument",
)
pos = PYVAL(params[0])
curr_pos = stream.tell()
fsize = stream.size()
if pos > fsize:
stream.seek(fsize)
return -1
elif pos < 0:
stream.seek(0)
return -1
diff = pos - curr_pos
if diff < 0:
stream.seek(pos)
return 0
data = stream.read(diff)
# let the ctxt automatically append numbers, as needed, unless the previous
# child was also a skipped field
skipped_name = "_skipped"
if len(ctxt._pfp__children) > 0 and ctxt._pfp__children[
-1
]._pfp__name.startswith("_skipped"):
old_name = ctxt._pfp__children[-1]._pfp__name
data = ctxt._pfp__children[-1].raw_data + data
skipped_name = old_name
ctxt._pfp__children = ctxt._pfp__children[:-1]
del ctxt._pfp__children_map[old_name]
tmp_stream = bitwrap.BitwrappedStream(six.BytesIO(data))
new_field = pfp.fields.Array(len(data), pfp.fields.Char, tmp_stream)
ctxt._pfp__add_child(skipped_name, new_field, stream)
scope.add_var(skipped_name, new_field)
return 0
# int FSkip( int64 offset )
@native(name="FSkip", ret=pfp.fields.Int)
def FSkip(params, ctxt, scope, stream, coord):
"""Returns 0 if successful or -1 if the address is out of range
"""
if len(params) != 1:
raise errors.InvalidArguments(
coord,
"{} args".format(len(params)),
"FSkip accepts only one argument",
)
skip_amt = PYVAL(params[0])
pos = skip_amt + stream.tell()
return FSeek([pos], ctxt, scope, stream, coord)
# int64 FTell()
@native(name="FTell", ret=pfp.fields.Int64)
def FTell(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
return stream.tell()
# void InsertBytes( int64 start, int64 size, uchar value=0 )
@native(name="InsertBytes", ret=pfp.fields.Void)
def InsertBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int IsBigEndian()
@native(name="IsBigEndian", ret=pfp.fields.Int)
def IsBigEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
if pfp.fields.NumberBase.endian == pfp.fields.BIG_ENDIAN:
return 1
else:
return 0
# int IsLittleEndian()
@native(name="IsLittleEndian", ret=pfp.fields.Int)
def IsLittleEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
if pfp.fields.NumberBase.endian == pfp.fields.LITTLE_ENDIAN:
return 0
else:
return 1
# void LittleEndian()
@native(name="LittleEndian", ret=pfp.fields.Void)
def LittleEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(
coord, "0 arguments", "{} args".format(len(params))
)
pfp.fields.NumberBase.endian = pfp.fields.LITTLE_ENDIAN
# int MakeDir( string dir )
@native(name="MakeDir", ret=pfp.fields.Int)
def MakeDir(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void OverwriteBytes( int64 start, int64 size, uchar value=0 )
@native(name="OverwriteBytes", ret=pfp.fields.Void)
def OverwriteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
def _read_data(params, stream, cls, coord):
bits = stream._bits
curr_pos = stream.tell()
if len(params) == 1:
pos = PYVAL(params[0])
stream.seek(pos, 0)
elif len(params) > 1:
raise errors.InvalidArguments(
coord, "at most 1 arguments", "{} args".format(len(params))
)
res = cls(stream=stream)
# reset the stream
stream.seek(curr_pos, 0)
stream._bits = bits
return res
# char ReadByte( int64 pos=FTell() )
@native(name="ReadByte", ret=pfp.fields.Char)
def ReadByte(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Char, coord)
# double ReadDouble( int64 pos=FTell() )
@native(name="ReadDouble", ret=pfp.fields.Double)
def ReadDouble(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Double, coord)
# float ReadFloat( int64 pos=FTell() )
@native(name="ReadFloat", ret=pfp.fields.Float)
def ReadFloat(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Float, coord)
# hfloat ReadHFloat( int64 pos=FTell() )
@native(name="ReadHFloat", ret=pfp.fields.Float)
def ReadHFloat(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Float, coord)
# int ReadInt( int64 pos=FTell() )
@native(name="ReadInt", ret=pfp.fields.Int)
def ReadInt(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Int, coord)
# int64 ReadInt64( int64 pos=FTell() )
@native(name="ReadInt64", ret=pfp.fields.Int64)
def ReadInt64(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Int64, coord)
# int64 ReadQuad( int64 pos=FTell() )
@native(name="ReadQuad", ret=pfp.fields.Int64)
def ReadQuad(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Int64, coord)
# short ReadShort( int64 pos=FTell() )
@native(name="ReadShort", ret=pfp.fields.Short)
def ReadShort(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Short, coord)
# uchar ReadUByte( int64 pos=FTell() )
@native(name="ReadUByte", ret=pfp.fields.UChar)
def ReadUByte(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UChar, coord)
# uint ReadUInt( int64 pos=FTell() )
@native(name="ReadUInt", ret=pfp.fields.UInt)
def ReadUInt(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UInt, coord)
# uint64 ReadUInt64( int64 pos=FTell() )
@native(name="ReadUInt64", ret=pfp.fields.UInt64)
def ReadUInt64(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UInt64, coord)
# uint64 ReadUQuad( int64 pos=FTell() )
@native(name="ReadUQuad", ret=pfp.fields.UInt64)
def ReadUQuad(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UInt64, coord)
# ushort ReadUShort( int64 pos=FTell() )
@native(name="ReadUShort", ret=pfp.fields.UShort)
def ReadUShort(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UShort, coord)
# char[] ReadLine( int64 pos, int maxLen=-1, int includeLinefeeds=true )
@native(name="ReadLine", ret=pfp.fields.String)
def ReadLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void ReadBytes( uchar buffer[], int64 pos, int n )
@native(name="ReadBytes", ret=pfp.fields.Void)
def ReadBytes(params, ctxt, scope, stream, coord):
if len(params) != 3:
raise errors.InvalidArguments(
coord,
"3 arguments (buffer, pos, n)",
"{} args".format(len(params)),
)
if not isinstance(params[0], pfp.fields.Array):
raise errors.InvalidArguments(
coord, "buffer must be an array", params[0].__class__.__name__
)
if params[0].field_cls not in [pfp.fields.UChar, pfp.fields.Char]:
raise errors.InvalidArguments(
coord,
"buffer must be an array of uchar or char",
params[0].field_cls.__name__,
)
if not isinstance(params[1], pfp.fields.IntBase):
raise errors.InvalidArguments(
coord, "pos must be an integer", params[1].__class__.__name__
)
if not isinstance(params[2], pfp.fields.IntBase):
raise errors.InvalidArguments(
coord, "n must be an integer", params[2].__class__.__name__
)
bits = stream._bits
curr_pos = stream.tell()
vals = [
params[0].field_cls(stream) for x in six.moves.range(PYVAL(params[2]))
]
stream.seek(curr_pos, 0)
stream._bits = bits
params[0]._pfp__set_value(vals)
# char[] ReadString( int64 pos, int maxLen=-1 )
@native(name="ReadString", ret=pfp.fields.String)
def ReadString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int ReadStringLength( int64 pos, int maxLen=-1 )
@native(name="ReadStringLength", ret=pfp.fields.Int)
def ReadStringLength(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# wstring ReadWLine( int64 pos, int maxLen=-1 )
@native(name="ReadWLine", ret=pfp.fields.WString)
def ReadWLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# wstring ReadWString( int64 pos, int maxLen=-1 )
@native(name="ReadWString", ret=pfp.fields.WString)
def ReadWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int ReadWStringLength( int64 pos, int maxLen=-1 )
@native(name="ReadWStringLength", ret=pfp.fields.Int)
def ReadWStringLength(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextAddressToLine( int64 address )
@native(name="TextAddressToLine", ret=pfp.fields.Int64)
def TextAddressToLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextAddressToColumn( int64 address )
@native(name="TextAddressToColumn", ret=pfp.fields.Int)
def TextAddressToColumn(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextColumnToAddress( int64 line, int column )
@native(name="TextColumnToAddress", ret=pfp.fields.Int64)
def TextColumnToAddress(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextGetNumLines()
@native(name="TextGetNumLines", ret=pfp.fields.Int64)
def TextGetNumLines(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextGetLineSize( int64 line, int includeLinefeeds=true )
@native(name="TextGetLineSize", ret=pfp.fields.Int)
def TextGetLineSize(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int64 TextLineToAddress( int64 line )
@native(name="TextLineToAddress", ret=pfp.fields.Int64)
def TextLineToAddress(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextReadLine( char buffer[], int64 line, int maxsize, int includeLinefeeds=true )
@native(name="TextReadLine", ret=pfp.fields.Int)
def TextReadLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# int TextReadLineW( wchar_t buffer[], int64 line, int maxsize, int includeLinefeeds=true )
@native(name="TextReadLineW", ret=pfp.fields.Int)
def TextReadLineW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void TextWriteLine( const char buffer[], int64 line, int includeLinefeeds=true )
@native(name="TextWriteLine", ret=pfp.fields.Void)
def TextWriteLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void TextWriteLineW( const wchar_t buffer[], int64 line, int includeLinefeeds=true )
@native(name="TextWriteLineW", ret=pfp.fields.Void)
def TextWriteLineW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteByte( int64 pos, char value )
@native(name="WriteByte", ret=pfp.fields.Void)
def WriteByte(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteDouble( int64 pos, double value )
@native(name="WriteDouble", ret=pfp.fields.Void)
def WriteDouble(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteFloat( int64 pos, float value )
@native(name="WriteFloat", ret=pfp.fields.Void)
def WriteFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteHFloat( int64 pos, float value )
@native(name="WriteHFloat", ret=pfp.fields.Void)
def WriteHFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteInt( int64 pos, int value )
@native(name="WriteInt", ret=pfp.fields.Void)
def WriteInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteInt64( int64 pos, int64 value )
@native(name="WriteInt64", ret=pfp.fields.Void)
def WriteInt64(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteQuad( int64 pos, int64 value )
@native(name="WriteQuad", ret=pfp.fields.Void)
def WriteQuad(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteShort( int64 pos, short value )
@native(name="WriteShort", ret=pfp.fields.Void)
def WriteShort(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUByte( int64 pos, uchar value )
@native(name="WriteUByte", ret=pfp.fields.Void)
def WriteUByte(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUInt( int64 pos, uint value )
@native(name="WriteUInt", ret=pfp.fields.Void)
def WriteUInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUInt64( int64 pos, uint64 value )
@native(name="WriteUInt64", ret=pfp.fields.Void)
def WriteUInt64(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUQuad( int64 pos, uint64 value )
@native(name="WriteUQuad", ret=pfp.fields.Void)
def WriteUQuad(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteUShort( int64 pos, ushort value )
@native(name="WriteUShort", ret=pfp.fields.Void)
def WriteUShort(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteBytes( const uchar buffer[], int64 pos, int n )
@native(name="WriteBytes", ret=pfp.fields.Void)
def WriteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteString( int64 pos, const char value[] )
@native(name="WriteString", ret=pfp.fields.Void)
def WriteString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
# void WriteWString( int64 pos, const wstring value )
@native(name="WriteWString", ret=pfp.fields.Void)
def WriteWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
```
#### File: pfp/native/compat_tools.py
```python
import binascii
import re
import six
import sys
from pfp.native import native, predefine
import pfp.errors as errors
import pfp.fields
import pfp.utils as utils
import zlib
# http://www.sweetscape.com/010editor/manual/FuncTools.htm
predefine(
"""
const int CHECKSUM_BYTE = 0; // Treats the file as a set of unsigned bytes
const int CHECKSUM_SHORT_LE = 1; // Treats the file as a set of unsigned little-endian shorts
const int CHECKSUM_SHORT_BE = 2; // Treats the file as a set of unsigned big-endian shorts
const int CHECKSUM_INT_LE = 3; // Treats the file as a set of unsigned little-endian ints
const int CHECKSUM_INT_BE = 4; // Treats the file as a set of unsigned big-endian ints
const int CHECKSUM_INT64_LE = 5; // Treats the file as a set of unsigned little-endian int64s
const int CHECKSUM_INT64_BE = 6; // Treats the file as a set of unsigned big-endian int64s
const int CHECKSUM_SUM8 = 7; // Same as CHECKSUM_BYTE except result output as 8-bits
const int CHECKSUM_SUM16 = 8; // Same as CHECKSUM_BYTE except result output as 16-bits
const int CHECKSUM_SUM32 = 9; // Same as CHECKSUM_BYTE except result output as 32-bits
const int CHECKSUM_SUM64 = 10; // Same as CHECKSUM_BYTE
const int CHECKSUM_CRC16 = 11;
const int CHECKSUM_CRCCCITT = 12;
const int CHECKSUM_CRC32 = 13;
const int CHECKSUM_ADLER32 = 14;
"""
)
# int64 Checksum(
# int algorithm,
# int64 start=0,
# int64 size=0,
# int64 crcPolynomial=-1,
# int64 crcInitValue=-1 )
@native(name="Checksum", ret=pfp.fields.Int64)
def Checksum(params, ctxt, scope, stream, coord):
"""
Runs a simple checksum on a file and returns the result as a int64. The
algorithm can be one of the following constants:
CHECKSUM_BYTE - Treats the file as a set of unsigned bytes
CHECKSUM_SHORT_LE - Treats the file as a set of unsigned little-endian shorts
CHECKSUM_SHORT_BE - Treats the file as a set of unsigned big-endian shorts
CHECKSUM_INT_LE - Treats the file as a set of unsigned little-endian ints
CHECKSUM_INT_BE - Treats the file as a set of unsigned big-endian ints
CHECKSUM_INT64_LE - Treats the file as a set of unsigned little-endian int64s
CHECKSUM_INT64_BE - Treats the file as a set of unsigned big-endian int64s
CHECKSUM_SUM8 - Same as CHECKSUM_BYTE except result output as 8-bits
CHECKSUM_SUM16 - Same as CHECKSUM_BYTE except result output as 16-bits
CHECKSUM_SUM32 - Same as CHECKSUM_BYTE except result output as 32-bits
CHECKSUM_SUM64 - Same as CHECKSUM_BYTE
CHECKSUM_CRC16
CHECKSUM_CRCCCITT
CHECKSUM_CRC32
CHECKSUM_ADLER32
If start and size are zero, the algorithm is run on the whole file. If
they are not zero then the algorithm is run on size bytes starting at
address start. See the ChecksumAlgBytes and ChecksumAlgStr functions
to run more complex algorithms. crcPolynomial and crcInitValue
can be used to set a custom polynomial and initial value for the
CRC functions. A value of -1 for these parameters uses the default
values as described in the Check Sum/Hash Algorithms topic. A negative
number is returned on error.
"""
checksum_types = {
0: "CHECKSUM_BYTE", # Treats the file as a set of unsigned bytes
1: "CHECKSUM_SHORT_LE", # Treats the file as a set of unsigned little-endian shorts
2: "CHECKSUM_SHORT_BE", # Treats the file as a set of unsigned big-endian shorts
3: "CHECKSUM_INT_LE", # Treats the file as a set of unsigned little-endian ints
4: "CHECKSUM_INT_BE", # Treats the file as a set of unsigned big-endian ints
5: "CHECKSUM_INT64_LE", # Treats the file as a set of unsigned little-endian int64s
6: "CHECKSUM_INT64_BE", # Treats the file as a set of unsigned big-endian int64s
7: "CHECKSUM_SUM8", # Same as CHECKSUM_BYTE except result output as 8-bits
8: "CHECKSUM_SUM16", # Same as CHECKSUM_BYTE except result output as 16-bits
9: "CHECKSUM_SUM32", # Same as CHECKSUM_BYTE except result output as 32-bits
10: "CHECKSUM_SUM64", # Same as CHECKSUM_BYTE
11: "CHECKSUM_CRC16",
12: "CHECKSUM_CRCCCITT",
13: _crc32,
14: _checksum_Adler32,
}
if len(params) < 1:
raise errors.InvalidArguments(
coord, "at least 1 argument", "{} args".format(len(params))
)
alg = PYVAL(params[0])
if alg not in checksum_types:
raise errors.InvalidArguments(
coord, "checksum alg must be one of (0-14)", "{}".format(alg)
)
start = 0
if len(params) > 1:
start = PYVAL(params[1])
size = 0
if len(params) > 2:
size = PYVAL(params[2])
crc_poly = -1
if len(params) > 3:
crc_poly = PYVAL(params[3])
crc_init = -1
if len(params) > 4:
crc_init = PYVAL(params[4])
stream_pos = stream.tell()
if start + size == 0:
stream.seek(0, 0)
data = stream.read()
else:
stream.seek(start, 0)
data = stream.read(size)
try:
return checksum_types[alg](data, crc_init, crc_poly)
finally:
# yes, this does execute even though a return statement
# exists within the try
stream.seek(stream_pos, 0)
def _checksum_Adler32(data, crc_init=-1, crc_poly=-1):
return zlib.adler32(data)
def _crc32(data, crc_init=-1, crc_poly=-1):
if crc_init == -1:
return binascii.crc32(data)
else:
return binascii.crc32(data, crc_init)
# int ChecksumAlgArrayStr(
# int algorithm,
# char result[],
# uchar *buffer,
# int64 size,
# char ignore[]="",
# int64 crcPolynomial=-1,
# int64 crcInitValue=-1 )
@native(name="ChecksumAlgArrayStr", ret=pfp.fields.Int)
def ChecksumAlgArrayStr(params, ctxt, scope, stream, coord):
"""
Similar to the ChecksumAlgStr function except that the checksum is
run on data stored in an array instead of in a file. The data for the
checksum should be in the buffer array and the size parameter
lists the number of bytes in the array. The result from the checksum
will be stored in the result string and the number of characters
in the string will be returned, or -1 if an error occurred. See the
ChecksumAlgStr function for a list of available algorithms.
"""
raise NotImplementedError()
# int ChecksumAlgArrayBytes(
# int algorithm,
# uchar result[],
# uchar *buffer,
# int64 size,
# char ignore[]="",
# int64 crcPolynomial=-1,
# int64 crcInitValue=-1 )
@native(name="ChecksumAlgArrayBytes", ret=pfp.fields.Int)
def ChecksumAlgArrayBytes(params, ctxt, scope, stream, coord):
"""
Similar to the ChecksumAlgStr function except that the checksum is run
on data in an array instead of in a file and the results are stored
in an array of bytes instead of a string. The data for the checksum
should be in the buffer array and the size parameter lists the
number of bytes in the array. The result of the checksum operation
will be stored as a set of hex bytes in the parameter result. The
function will return the number of bytes placed in the result array
or -1 if an error occurred. See the ChecksumAlgStr function for a
list of available algorithms.
"""
raise NotImplementedError()
# int ChecksumAlgStr(
# int algorithm,
# char result[],
# int64 start=0,
# int64 size=0,
# char ignore[]="",
# int64 crcPolynomial=-1,
# int64 crcInitValue=-1 )
@native(name="ChecksumAlgStr", ret=pfp.fields.Int)
def ChecksumAlgStr(params, ctxt, scope, stream, coord):
"""
Similar to the Checksum algorithm except the following algorithm
constants are supported:
CHECKSUM_BYTE
CHECKSUM_SHORT_LE
CHECKSUM_SHORT_BE
CHECKSUM_INT_LE
CHECKSUM_INT_BE
CHECKSUM_INT64_LE
CHECKSUM_INT64_BE
CHECKSUM_SUM8
CHECKSUM_SUM16
CHECKSUM_SUM32
CHECKSUM_SUM64
CHECKSUM_CRC16
CHECKSUM_CRCCCITT
CHECKSUM_CRC32
CHECKSUM_ADLER32
CHECKSUM_MD2
CHECKSUM_MD4
CHECKSUM_MD5
CHECKSUM_RIPEMD160
CHECKSUM_SHA1
CHECKSUM_SHA256
CHECKSUM_SHA512
CHECKSUM_TIGER
The result argument specifies a string which will hold the result of
the checksum. The return value indicates the number of characters
in the string, or is negative if an error occurred. Any ranges to
ignore can be specified in string format with the ignore argument
(see Check Sum/Hash Algorithms). The crcPolynomial and crcInitValue
parameters are used to set a custom polynomial and initial value
for the CRC algorithms. Specifying -1 for these parameters uses the
default values as indicated in the Check Sum/Hash Algorithms help
topic. See the Checksum function above for an explanation of the
different checksum constants.
"""
raise NotImplementedError()
# int ChecksumAlgBytes(
# int algorithm,
# uchar result[],
# int64 start=0,
# int64 size=0,
# char ignore[]="",
# int64 crcPolynomial=-1,
# int64 crcInitValue=-1 )
@native(name="ChecksumAlgBytes", ret=pfp.fields.Int)
def ChecksumAlgBytes(params, ctxt, scope, stream, coord):
"""
This function is identical to the ChecksumAlgStr function except that
the checksum is returned as a byte array in the result argument. The
return value is the number of bytes returned in the array.
"""
raise NotImplementedError()
# TCompareResults Compare(
# int type,
# int fileNumA,
# int fileNumB,
# int64 startA=0,
# int64 sizeA=0,
# int64 startB=0,
# int64 sizeB=0,
# int matchcase=true,
# int64 maxlookahead=10000,
# int64 minmatchlength=8,
# int64 quickmatch=512 )
@native(name="Compare", ret=pfp.fields.Void)
def Compare(params, ctxt, scope, stream, coord):
"""
Runs a comparison between two files or between two blocks of data. The
type argument indicates the type of comparison that should be run
and can be either:
COMPARE_SYNCHRONIZE (a binary comparison)
COMPARE_SIMPLE (a byte-by-byte comparison)
fileNumA and fileNumB indicate the numbers of the file to compare (see
GetFileNum). The file numbers may be the same to compare two blocks
in the same file. The startA, sizeA, startB, and sizeB arguments
indicate the size of the blocks to compare in the two files. If the
start and size are both zero, the whole file is used. If matchcase is
false, then letters of mixed upper and lower cases will match. See
Comparing Files for details on the maxlookahead, minmatchlength and
quickmatch arguments. The return value is TCompareResults structure
with contains a count variable indicating the number of resulting
ranges, and an array of record. Each record contains the variables
type, startA, sizeA, startB, and sizeB to indicate the range. The
type variable will be one of:
COMPARE_MATCH=0
COMPARE_DIFFERENCE=1
COMPARE_ONLY_IN_A=2
COMPARE_ONLY_IN_B=3
"""
raise NotImplementedError()
# char ConvertASCIIToEBCDIC( char ascii )
@native(name="ConvertASCIIToEBCDIC", ret=pfp.fields.Char)
def ConvertASCIIToEBCDIC(params, ctxt, scope, stream, coord):
"""
Converts the given ASCII character into an EBCDIC character and returns the result.
"""
raise NotImplementedError()
# void ConvertASCIIToUNICODE(
# int len,
# const char ascii[],
# ubyte unicode[],
# int bigendian=false )
@native(name="ConvertASCIIToUNICODE", ret=pfp.fields.Void)
def ConvertASCIIToUNICODE(params, ctxt, scope, stream, coord):
"""
Converts an ASCII string into an array of bytes and stores them in the
unicode argument. len indicates the number of characters to convert
and the unicode array must be of size at least 2*len. If bigendian
is true, the bytes are stored in big-endian mode, otherwise the
bytes are stored in little-endian mode.
"""
raise NotImplementedError()
# void ConvertASCIIToUNICODEW(
# int len,
# const char ascii[],
# ushort unicode[] )
@native(name="ConvertASCIIToUNICODEW", ret=pfp.fields.Void)
def ConvertASCIIToUNICODEW(params, ctxt, scope, stream, coord):
"""
Converts an ASCII string into an array of words and stores the array in
the unicode argument. The number of characters to convert is given by
the len argument and the unicode argument must have size at least len.
"""
raise NotImplementedError()
# char ConvertEBCDICToASCII( char ebcdic )
@native(name="ConvertEBCDICToASCII", ret=pfp.fields.Char)
def ConvertEBCDICToASCII(params, ctxt, scope, stream, coord):
"""
Converts the given EBCDIC character into an ASCII character and returns the result.
"""
raise NotImplementedError()
# void ConvertUNICODEToASCII(
# int len,
# const ubyte unicode[],
# char ascii[],
# int bigendian=false )
@native(name="ConvertUNICODEToASCII", ret=pfp.fields.Void)
def ConvertUNICODEToASCII(params, ctxt, scope, stream, coord):
"""
Converts an array of UNICODE characters in the unicode argument into
ASCII bytes and stores them in the ascii array. len indicates the
number of characters to convert. unicode must be of size at least
size 2*len and ascii must be of size at least len. If bigendian is
true, the bytes are stored in big-endian mode, otherwise the bytes
are stored in little-endian mode.
"""
raise NotImplementedError()
# void ConvertUNICODEToASCIIW(
# int len,
# const ushort unicode[],
# char ascii[] )
@native(name="ConvertUNICODEToASCIIW", ret=pfp.fields.Void)
def ConvertUNICODEToASCIIW(params, ctxt, scope, stream, coord):
"""
Converts the array of words in the unicode argument to ASCII bytes and
saves them to the ascii argument. The number of characters to convert
is given by len. unicode and ascii must be of size at least size len.
"""
raise NotImplementedError()
# int ExportFile(
# int type,
# char filename[],
# int64 start=0,
# int64 size=0,
# int64 startaddress=0,
# int bytesperrow=16,
# int wordaddresses=0 )
@native(name="ExportFile", ret=pfp.fields.Int)
def ExportFile(params, ctxt, scope, stream, coord):
"""
Exports the currently open file to a file on disk given by filename
using one of the following type formats:
EXPORT_HEXTEXT
EXPORT_DECTEXT
EXPORT_BINARYTEXT
EXPORT_CCODE
EXPORT_JAVACODE
EXPORT_INTEL8
EXPORT_INTEL16
EXPORT_INTEL32
EXPORT_S19
EXPORT_S28
EXPORT_S37
EXPORT_TEXT_AREA
EXPORT_HTML
EXPORT_RTF
EXPORT_BASE64
EXPORT_UUENCODE
The start and size arguments indicate what portion of the
file to export. If they are both zero then the whole file is
exported. startaddress indicates the starting address that is written
to the file for Intel Hex or Motorola formats. bytesperrow indicates
the number of bytes written on each line of the output file. If
wordaddresses is true and the export format is Intel Hex, the file
will be written using word-based addresses. See Importing/Exporting
Files for more information on exporting.
"""
raise NotImplementedError()
FIND_MATCHES_ITER = None
FIND_MATCHES_START_OFFSET = 0
FINDMETHOD_NORMAL = 0
FINDMETHOD_WILDCARDS = 1
FINDMETHOD_REGEX = 2
predefine(
"""
const int FINDMETHOD_NORMAL = 0; // a normal search
const int FINDMETHOD_WILDCARDS = 1; // when searching for strings use wildcards '*' or '?'
const int FINDMETHOD_REGEX = 2; // when searching for strings use Regular Expressions
/*
This structure contains a count variable indicating the number of matches,
and a start array holding an array of starting positions, plus a size array
which holds an array of target lengths
*/
typedef struct {
unsigned int count;
unsigned int start[];
unsigned int size[];
} TFindResults;
"""
)
def _find_helper(params, ctxt, scope, stream, coord, interp):
global FIND_MATCHES_START_OFFSET
if len(params) == 0:
raise errors.InvalidArguments(
coord, "at least 1 argument", "{} args".format(len(params))
)
if (
isinstance(params[0], pfp.fields.Array) and params[0].is_stringable()
) or isinstance(params[0], pfp.fields.String):
data = PYSTR(params[0]) # should correctly do null termination
else:
data = params[0]._pfp__build()
if len(params) > 1:
match_case = not not PYVAL(params[1])
else:
match_case = True
if len(params) > 2:
wholeword = not not PYVAL(params[2])
else:
wholeword = False
if len(params) > 3:
method = PYVAL(params[3])
else:
method = FINDMETHOD_NORMAL
if len(params) > 4:
tolerance = PYVAL(params[4])
if tolerance != 0.0:
raise NotImplementedError(
"tolerance in FindAll is not fully implemented"
)
else:
tolerance = 0.0
if len(params) > 5:
direction = PYVAL(params[5])
else:
direction = 1
if len(params) > 6:
start = PYVAL(params[6])
else:
start = 0
FIND_MATCHES_START_OFFSET = start
if len(params) > 7:
size = PYVAL(params[7])
else:
size = 0
if len(params) > 8:
wildcard_match_length = PYVAL(params[8])
else:
wildcard_match_length = 24
regex = re.escape(data)
if method == FINDMETHOD_WILDCARDS:
# * wildcard
# make it a non-greedy match as well (add the question mark at the end)
regex = regex.replace(r"\*", ".{," + str(wildcard_match_length) + "}?")
# ? wildcard
regex = regex.replace(r"\?", ".")
if method == FINDMETHOD_REGEX:
regex = data
if wholeword:
regex = "\\b" + regex + "\\b"
regex = utils.binary(regex)
stream_bits = stream._bits
stream_pos = stream.tell()
stream.seek(start)
if size == 0:
search_data = stream.read(stream.size())
else:
search_data = stream.read(size)
stream.seek(stream_pos)
stream._bits = stream_bits
flags = 0
if not match_case:
flags |= re.IGNORECASE
return re.finditer(regex, search_data, flags)
# TFindResults FindAll(
# <datatype> data,
# int matchcase=true,
# int wholeword=false,
# int method=0,
# double tolerance=0.0,
# int dir=1,
# int64 start=0,
# int64 size=0,
# int wildcardMatchLength=24 )
@native(name="FindAll", ret="TFindResults", send_interp=True)
def FindAll(params, ctxt, scope, stream, coord, interp):
"""
This function converts the argument data into a set of hex bytes
and then searches the current file for all occurrences of those
bytes. data may be any of the basic types or an array of one of
the types. If data is an array of signed bytes, it is assumed to
be a null-terminated string. To search for an array of hex bytes,
create an unsigned char array and fill it with the target value. If
the type being search for is a string, the matchcase and wholeworld
arguments can be used to control the search (see Using Find for more
information). method controls which search method is used from the
following options:
FINDMETHOD_NORMAL=0 - a normal search
FINDMETHOD_WILDCARDS=1 - when searching for strings use wildcards '*' or '?'
FINDMETHOD_REGEX=2 - when searching for strings use Regular Expressions
wildcardMatchLength indicates the maximum number of characters a '*' can match when searching using wildcards. If the target is a float or double, the tolerance argument indicates that values that are only off by the tolerance value still match. If dir is 1 the find direction is down and if dir is 0 the find direction is up. start and size can be used to limit the area of the file that is searched. start is the starting byte address in the file where the search will begin and size is the number of bytes after start that will be searched. If size is zero, the file will be searched from start to the end of the file.
The return value is a TFindResults structure. This structure contains a count variable indicating the number of matches, and a start array holding an array of starting positions, plus a size array which holds an array of target lengths. For example, use the following code to find all occurrences of the ASCII string "Test" in a file:
"""
matches_iter = _find_helper(params, ctxt, scope, stream, coord, interp)
matches = list(matches_iter)
types = interp.get_types()
res = types.TFindResults()
res.count = len(matches)
# python3 map doesn't return a list
starts = list(
map(lambda m: m.start() + FIND_MATCHES_START_OFFSET, matches)
)
res.start = starts
# python3 map doesn't return a list
sizes = list(map(lambda m: m.end() - m.start(), matches))
res.size = sizes
return res
"""Used to keep track of the current matches"""
# int64 FindFirst(
# <datatype> data,
# int matchcase=true,
# int wholeword=false,
# int method=0,
# double tolerance=0.0,
# int dir=1,
# int64 start=0,
# int64 size=0,
# int wildcardMatchLength=24 )
@native(name="FindFirst", ret=pfp.fields.Int64, send_interp=True)
def FindFirst(params, ctxt, scope, stream, coord, interp):
"""
This function is identical to the FindAll function except that the
return value is the position of the first occurrence of the target
found. A negative number is returned if the value could not be found.
"""
global FIND_MATCHES_ITER
FIND_MATCHES_ITER = _find_helper(
params, ctxt, scope, stream, coord, interp
)
try:
first = six.next(FIND_MATCHES_ITER)
return first.start() + FIND_MATCHES_START_OFFSET
except StopIteration as e:
return -1
# int64 FindNext( int dir=1 )
@native(name="FindNext", ret=pfp.fields.Int64)
def FindNext(params, ctxt, scope, stream, coord):
"""
This function returns the position of the next occurrence of the
target value specified with the FindFirst function. If dir is 1, the
find direction is down. If dir is 0, the find direction is up. The
return value is the address of the found data, or -1 if the target
is not found.
"""
if FIND_MATCHES_ITER is None:
raise errors.InvalidState()
direction = 1
if len(params) > 0:
direction = PYVAL(params[0])
if direction != 1:
# TODO maybe instead of storing the iterator in FIND_MATCHES_ITER,
# we should go ahead and find _all the matches in the file and store them
# in a list, keeping track of the idx of the current match.
#
# This would be highly inefficient on large files though.
raise NotImplementedError("Reverse searching is not yet implemented")
try:
next_match = six.next(FIND_MATCHES_ITER)
return next_match.start() + FIND_MATCHES_START_OFFSET
except StopIteration as e:
return -1
# TFindInFilesResults FindInFiles(
# <datatype> data,
# char dir[],
# char mask[],
# int subdirs=true,
# int openfiles=false,
# int matchcase=true,
# int wholeword=false,
# int method=0,
# double tolerance=0.0,
# int wildcardMatchLength=24 )
@native(name="FindInFiles", ret=pfp.fields.Void)
def FindInFiles(params, ctxt, scope, stream, coord):
"""
Searches for a given set of data across multiple files. See the FindAll
function for information on the data, matchcase, wholeword, method,
wildcardMatchLength and tolerance arguments. The dir argument indicates
the starting directory where the search will take place. mask indicates
which file types to search and may contain the characters '*' and
'?'. If subdirs is true, all subdirectories are recursively searched
for the value as well. If openfiles is true, only the currently
open files are searched. The return value is the TFindInFilesResults
structure which contains a count variable indicate the number of files
found plus an array of file variables. Each file variable contains
a count variable indicating the number of matches, plus an array of
start and size variables indicating the match position. For example:
"""
raise NotImplementedError()
# TFindStringsResults FindStrings(
# int minStringLength,
# int type,
# int matchingCharTypes,
# wstring customChars="",
# int64 start=0,
# int64 size=0,
# int requireNull=false )
@native(name="FindStrings", ret=pfp.fields.Void)
def FindStrings(params, ctxt, scope, stream, coord):
"""
Attempts to locate any strings within a binary file similar to the Find
Strings dialog which is accessed by clicking 'Search > Find Strings'
on the main menu. Specify the minimum length of each string in number
of characters with the minStringLength parameter. The type option
tells the algorithm to look for ASCII strings, UNICODE strings or
both by using one of the following constants:
FINDSTRING_ASCII
FINDSTRING_UNICODE
FINDSTRING_BOTH
To specify which characters are considered as part of a string,
use an OR bitmask ('|') of one or more of the following constants:
FINDSTRING_LETTERS - the letters A..Z and a..z
FINDSTRING_LETTERS_ALL - all international numbers including FINDSTRING_LETTERS
FINDSTRING_NUMBERS - the numbers 0..9
FINDSTRING_NUMBERS_ALL - all international numbers including FINDSTRING_NUMBERS
FINDSTRING_SYMBOLS - symbols such as '#', '@', '!', etc. except for '_'
FINDSTRING_UNDERSCORE - the character '_'
FINDSTRING_SPACES - spaces or whitespace
FINDSTRING_LINEFEEDS - line feed characters 0x0a, 0x0d
FINDSTRING_CUSTOM - include any custom characters in the customChars string
Note if the FINDSTRING_CUSTOM constant is included, any characters
from customChars are considered as part of the string otherwise the
customChars string is ignored. The start and size parameters indicate
the range of the file to search and if size is zero, the file is
searched starting from start to the end of the file. If requireNull
is true, the strings must have a null (0) character after each string.
The return value is a TFindStringsResults structure which contains a
count variable with the number of strings found, a start array holding
the starting position of each string, a size array holding the size in
bytes of each string, and a type array which indicates FINDSTRING_ASCII
if the string is an ASCII string or FINDSTRING_UNICODE if the string
is a Unicode string. For example, the following code finds all ASCII
strings of length at least 5 containing the characters "A..Za..z$&":
"""
raise NotImplementedError()
# int GetSectorSize()
@native(name="GetSectorSize", ret=pfp.fields.Int)
def GetSectorSize(params, ctxt, scope, stream, coord):
"""
Returns the size in bytes of the sectors for this drive. If this
file is not a drive, the current sector size is defined using the
'View > Division Lines > Set Sector Size' menu option.
"""
raise NotImplementedError()
# int HexOperation(
# int operation,
# int64 start,
# int64 size,
# operand,
# step=0,
# int64 skip=0 )
@native(name="HexOperation", ret=pfp.fields.Int)
def HexOperation(params, ctxt, scope, stream, coord):
"""
Perform any of the operations on hex data as available in the Hex
Operations dialog. The operation parameter chooses which operation to
perform and these operations are described in the Hex Operations dialog
documentation. start and size indicate which range of bytes to operate
on and if size is 0, the whole file is used. The operand indicates what
value to use during the operation and the result is different depending
upon which operation is used (see the Hex Operations dialog). operand
can be any of the basic numeric or floating point types and the type
of this parameter tells the function how to interpret the data. For
example, if a 'ushort' is raise NotImplementedError()ed as an operand, the block of data is
considered as an array of 'ushort' using the current endian. If step
is non-zero, the operand is incremented by step after each operation
and if skip is non-zero, skip number of bytes are skipped after each
operation. This function returns the number of bytes modified if
successful, or a negative number on error. The following constants
can be used for the operation parameter:
HEXOP_ASSIGN
HEXOP_ADD
HEXOP_SUBTRACT
HEXOP_MULTIPLY
HEXOP_DIVIDE
HEXOP_NEGATE
HEXOP_MODULUS
HEXOP_SET_MINIMUM
HEXOP_SET_MAXIMUM
HEXOP_SWAP_BYTES
HEXOP_BINARY_AND
HEXOP_BINARY_OR
HEXOP_BINARY_XOR
HEXOP_BINARY_INVERT
HEXOP_SHIFT_LEFT
HEXOP_SHIFT_RIGHT
HEXOP_SHIFT_BLOCK_LEFT
HEXOP_SHIFT_BLOCK_RIGHT
HEXOP_ROTATE_LEFT
HEXOP_ROTATE_RIGHT
For example, the following code would treat the bytes from address
16 to 48 as an array of floats and add the value 3.0 to each float
in the array:
"""
raise NotImplementedError()
# int64 Histogram( int64 start, int64 size, int64 result[256] )
@native(name="Histogram", ret=pfp.fields.Int64)
def Histogram(params, ctxt, scope, stream, coord):
"""
Counts the number of bytes of each value in the file from 0 up to
255. The bytes are counting starting from address start and continuing
for size bytes. The resulting counts are stored in the int64 array
results. For example, result[0] would indicate the number of 0 bytes
values found in the given range of data. The return value is the
total number of bytes read.
"""
raise NotImplementedError()
# int ImportFile( int type, char filename[], int wordaddresses=false, int defaultByteValue=-1 , coord)
@native(name="ImportFile", ret=pfp.fields.Int)
def ImportFile(params, ctxt, scope, stream, coord):
"""
Attempts to import the file specified by filename in one of the
supported import formats. The format is given by the type argument
and may be:
IMPORT_HEXTEXT
IMPORT_DECTEXT
IMPORT_BINARYTEXT
IMPORT_SOURCECODE
IMPORT_INTEL
IMPORT_MOTOROLA
IMPORT_BASE64
IMPORT_UUENCODE
If successful, the file is opened as a new file in the editor. If
the function fails, a negative number is returned. If wordaddresses
is true and the file is an Intel Hex file, the file is imported
using word-based addressing. When importing some data formats (such
as Intel Hex or S-Records) these formats may skip over certain
bytes. The value to assign these bytes can be controlled with the
defaultByteValue parameter and if the parameter is -1, the value
from the Importing Options dialog is used. See Importing/Exporting
Files for more information on importing.
"""
raise NotImplementedError()
# int IsDrive()
@native(name="IsDrive", ret=pfp.fields.Int)
def IsDrive(params, ctxt, scope, stream, coord):
"""
Returns true if the current file is a physical or logical drive,
or false otherwise (see Editing Drives).
"""
raise NotImplementedError()
# int IsLogicalDrive()
@native(name="IsLogicalDrive", ret=pfp.fields.Int)
def IsLogicalDrive(params, ctxt, scope, stream, coord):
"""
Returns true if the current file is a logical drive, or false otherwise
(see Editing Drives).
"""
raise NotImplementedError()
# int IsPhysicalDrive()
@native(name="IsPhysicalDrive", ret=pfp.fields.Int)
def IsPhysicalDrive(params, ctxt, scope, stream, coord):
"""
Returns true if the current file is a physical drive, or false
otherwise (see Editing Drives).
"""
raise NotImplementedError()
# int IsProcess()
@native(name="IsProcess", ret=pfp.fields.Int)
def IsProcess(params, ctxt, scope, stream, coord):
"""
Returns true if the current file is a process, or false otherwise
(see Editing Processes).
"""
raise NotImplementedError()
# int OpenLogicalDrive( char driveletter )
@native(name="OpenLogicalDrive", ret=pfp.fields.Int)
def OpenLogicalDrive(params, ctxt, scope, stream, coord):
"""
Opens the drive with the given driveLetter as a new file in the
editor. For example, 'OpenLogicalDrive('c');'. This function returns
a negative number on failure. See Editing Drives for more information
on drive editing.
"""
raise NotImplementedError()
# int OpenPhysicalDrive( int physicalID )
@native(name="OpenPhysicalDrive", ret=pfp.fields.Int)
def OpenPhysicalDrive(params, ctxt, scope, stream, coord):
"""
Opens the physical drive physicalID as a new file in the editor
(see Editing Drives). For example, 'OpenPhysicalDrive(0);'. This
function returns a negative number on failure.
"""
raise NotImplementedError()
# int OpenProcessById( int processID, int openwriteable=true )
@native(name="OpenProcessById", ret=pfp.fields.Int)
def OpenProcessById(params, ctxt, scope, stream, coord):
"""
Opens a process identified by the processID number (see Editing
Processes). If openwriteable is true, only bytes that can be modified
are opened, otherwise all readable bytes are opened. A negative
number if returned if this function fails.
"""
raise NotImplementedError()
# int OpenProcessByName( char processname[], int openwriteable=true )
@native(name="OpenProcessByName", ret=pfp.fields.Int)
def OpenProcessByName(params, ctxt, scope, stream, coord):
"""
Attempts to open a process given by the name processname as a new
file in the editor. For example: 'OpenProcessByName( "cmd.exe" );'
If openwriteable is true, only bytes that can be modified are opened,
otherwise all readable bytes are opened. A negative number if returned
if this function fails. See Editing Processes for more information.
"""
raise NotImplementedError()
# int ReplaceAll(
# <datatype> finddata,
# <datatype> replacedata,
# int matchcase=true,
# int wholeword=false,
# int method=0,
# double tolerance=0.0,
# int dir=1,
# int64 start=0,
# int64 size=0,
# int padwithzeros=false,
# int wildcardMatchLength=24 )
@native(name="ReplaceAll", ret=pfp.fields.Int)
def ReplaceAll(params, ctxt, scope, stream, coord):
"""
This function converts the arguments finddata and replacedata into
a set of bytes, and then finds all occurrences of the find bytes
in the file and replaces them with the replace bytes. The arguments
matchcase, wholeword, method, wildcardMatchLength, tolerance, dir,
start, and size are all used when finding a value and are discussed
in the FindAll function above. If padwithzeros is true, a set of
zero bytes are added to the end of the replace data until it is
the same length as the find data. The return value is the number of
replacements made.
"""
raise NotImplementedError()
```
#### File: pfp/tests/test_cast.py
```python
import os
import struct
import sys
import unittest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import pfp
import pfp.errors
from pfp.fields import *
import pfp.utils
import utils
class TestArrays(utils.PfpTestCase):
def setUp(self):
pfp.fields.NumberBase.endian = pfp.fields.BIG_ENDIAN
def tearDown(self):
pass
def test_cast_basic(self):
dom = self._test_parse_build(
"",
"""
local int a = 0x61;
local uchar b = (char)a;
Printf(b);
""",
stdout="a",
)
def test_cast_from_dex(self):
dom = self._test_parse_build(
"",
"""
local ubyte cur = 7;
local uint test1 = (uint)(10);
local uint test2 = (uint)((cur & 0x7f) << 7);
Printf("%u,%u", test1, test2);
""",
stdout="10,896",
)
if __name__ == "__main__":
unittest.main()
```
#### File: pfp/tests/test_fields.py
```python
import os
try:
from StringIO import StringIO
# StringIO does not exist in python3
except ImportError as e:
from io import StringIO
import struct
import sys
import unittest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import pfp
import pfp.errors
from pfp.fields import *
import pfp.utils
import utils
class TestNumericFields(utils.PfpTestCase):
def setUp(self):
pass
def tearDown(self):
pass
def _do_parse(self, field, data):
field._pfp__parse(StringIO(data.decode("ISO-8859-1")))
def _do_endian_tests(self, field, format):
field.endian = pfp.fields.BIG_ENDIAN
self._do_parse(field, struct.pack(">" + format, 1))
self.assertEqual(field, 1)
field.endian = pfp.fields.LITTLE_ENDIAN
self._do_parse(field, struct.pack("<" + format, 1))
self.assertEqual(field, 1)
def test_char(self):
field = Char()
self._do_endian_tests(field, "b")
def test_uchar(self):
field = UChar()
self._do_endian_tests(field, "b")
def test_short(self):
field = Short()
self._do_endian_tests(field, "h")
def test_ushort(self):
field = UShort()
self._do_endian_tests(field, "H")
def test_int(self):
field = Int()
self._do_endian_tests(field, "i")
def test_uint(self):
field = UInt()
self._do_endian_tests(field, "I")
def test_int64(self):
field = Int64()
self._do_endian_tests(field, "q")
def test_int64(self):
field = UInt64()
self._do_endian_tests(field, "Q")
def test_const_int64(self):
dom = self._test_parse_build(
"",
"""
const uint64 PNGMAGIC = 0x89504E470D0A1A0AL;
Printf("%d", PNGMAGIC);
""",
stdout="9894494448401390090",
)
if __name__ == "__main__":
unittest.main()
```
#### File: pfp/tests/test_type_creation.py
```python
import os
import sys
import unittest
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
import pfp
import pfp.fields
from pfp.fields import PYVAL, PYSTR
import pfp.interp
import pfp.utils
import utils
class TestTypeCreation(utils.PfpTestCase):
def setUp(self):
pfp.fields.NumberBase.endian = pfp.fields.LITTLE_ENDIAN
def tearDown(self):
pass
def test_atomic(self):
dom = self._test_parse_build(
"",
"""
typedef unsigned int BLAH;
""",
)
res = dom.BLAH()
self.assertTrue(isinstance(res, pfp.fields.UInt))
self.assertEqual(res, 0)
def test_struct(self):
dom = self._test_parse_build(
"",
"""
LittleEndian();
typedef struct {
char a;
char b;
uint c;
} TEST_STRUCT;
""",
)
res = dom.TEST_STRUCT()
self.assertTrue(isinstance(res, pfp.fields.Struct))
self.assertEqual(res.a, 0)
self.assertEqual(res.b, 0)
self.assertEqual(res.c, 0)
res.a = 0x30
res.b = 0x40
res.c = 0x1000
self.assertEqual(res.a, 0x30)
self.assertEqual(res.b, 0x40)
self.assertEqual(res.c, 0x1000)
output = res._pfp__build()
self.assertEqual(output, pfp.utils.binary("\x30\x40\x00\x10\x00\x00"))
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "666Chao666/covid19-severity-prediction",
"score": 3
} |
#### File: processed/usafacts_infections/clean.py
```python
import pandas as pd
import numpy as np
import os
from os.path import join as oj
from os.path import dirname
if __name__ == '__main__':
import sys
sys.path.append(oj(os.path.dirname(__file__), '../../raw/usafacts_infections/'))
from load import load_usafacts_infections
else:
from ...raw.usafacts_infections.load import load_usafacts_infections
def clean_usafacts_infections(data_dir='../../raw/usafacts_infections/',
out_dir='.'):
''' Clean usafacts data
Parameters
----------
data_dir : str; path to the data directory to find raw csv
out_dir : str; path to the data directory to write cleaned csv
Returns
-------
writes out cleaned csv file and returns clean data frame
'''
# load in data
df = load_usafacts_infections(data_dir = data_dir)
# two counties changed their county FIPS
if "02158" in df["countyFIPS"].unique():
df.loc[df["countyFIPS"] == "02158", "countyFIPS"] = "02270"
if "46102" in df["countyFIPS"].unique():
df.loc[df["countyFIPS"] == "46102", "countyFIPS"] = "46113"
# merge counties countyFIPS")with the same countyFIPS
df = df.groupby("countyFIPS").sum().reset_index()
# remove princess cruise ship
df = df[df['countyFIPS'] != "06000"]
# write out to csv
df.to_csv(oj(out_dir, "usafacts_infections.csv"), index=False)
return df
if __name__ == '__main__':
df = clean_usafacts_infections()
print("cleaned usafacts infections successfully.")
```
#### File: processed/DH_hospital/clean.py
```python
import pandas as pd
import sys
sys.path.append("../../raw/DH_hospital")
from load import load_DH_hospital
def clean_DH_hospital(input="../../raw/DH_hospital/DH_hospital.csv"):
raw = load_DH_hospital(input)
raw = raw.rename(columns={
"Facility ID": "CMS Certification Number",
"Facility Name": "Hospital Name",
})
raw.to_csv("DH_hospital.csv", index=False)
return raw
if __name__ == '__main__':
clean_DH_hospital()
print("clean DH_hospital successfully.")
``` |
{
"source": "666Chao666/snowflake-connector-python",
"score": 2
} |
#### File: test/unit/test_connection.py
```python
import pytest
from mock import patch
import snowflake.connector
try: # pragma: no cover
from snowflake.connector.constants import QueryStatus
except ImportError:
QueryStatus = None
@patch('snowflake.connector.network.SnowflakeRestful._post_request')
def test_connect_with_service_name(mockSnowflakeRestfulPostRequest):
def mock_post_request(url, headers, json_body, **kwargs):
global mock_cnt
ret = None
if mock_cnt == 0:
# return from /v1/login-request
ret = {
'success': True,
'message': None,
'data': {
'token': 'TOKEN',
'masterToken': 'MASTER_TOKEN',
'idToken': None,
'parameters': [
{'name': 'SERVICE_NAME', 'value': "FAKE_SERVICE_NAME"}
],
}}
return ret
# POST requests mock
mockSnowflakeRestfulPostRequest.side_effect = mock_post_request
global mock_cnt
mock_cnt = 0
account = 'testaccount'
user = 'testuser'
# connection
con = snowflake.connector.connect(
account=account,
user=user,
password='<PASSWORD>',
database='TESTDB',
warehouse='TESTWH',
)
assert con.service_name == 'FAKE_SERVICE_NAME'
@pytest.mark.skip(reason="Mock doesn't work as expected.")
@patch(
'snowflake.connector.network.SnowflakeRestful._post_request'
)
def test_connection_ignore_exception(mockSnowflakeRestfulPostRequest):
def mock_post_request(url, headers, json_body, **kwargs):
global mock_cnt
ret = None
if mock_cnt == 0:
# return from /v1/login-request
ret = {
'success': True,
'message': None,
'data': {
'token': 'TOKEN',
'masterToken': 'MASTER_TOKEN',
'idToken': None,
'parameters': [
{'name': 'SERVICE_NAME', 'value': "FAKE_SERVICE_NAME"}
],
}}
elif mock_cnt == 1:
ret = {
'success': False,
'message': "Session gone",
'data': None,
'code': 390111
}
mock_cnt += 1
return ret
# POST requests mock
mockSnowflakeRestfulPostRequest.side_effect = mock_post_request
global mock_cnt
mock_cnt = 0
account = 'testaccount'
user = 'testuser'
# connection
con = snowflake.connector.connect(
account=account,
user=user,
password='<PASSWORD>',
database='TESTDB',
warehouse='TESTWH',
)
# Test to see if closing connection works or raises an exception. If an exception is raised, test will fail.
con.close()
@pytest.mark.skipolddriver
def test_is_still_running():
"""Checks that is_still_running returns expected results."""
statuses = [
(QueryStatus.RUNNING, True),
(QueryStatus.ABORTING, False),
(QueryStatus.SUCCESS, False),
(QueryStatus.FAILED_WITH_ERROR, False),
(QueryStatus.ABORTED, False),
(QueryStatus.QUEUED, True),
(QueryStatus.FAILED_WITH_INCIDENT, False),
(QueryStatus.DISCONNECTED, False),
(QueryStatus.RESUMING_WAREHOUSE, True),
(QueryStatus.QUEUED_REPAIRING_WAREHOUSE, True),
(QueryStatus.RESTARTED, False),
(QueryStatus.BLOCKED, False),
(QueryStatus.NO_DATA, True),
]
for status, expected_result in statuses:
assert snowflake.connector.SnowflakeConnection.is_still_running(status) == expected_result
``` |
{
"source": "666DZY666/oneflow",
"score": 2
} |
#### File: python/ops/one_hot.py
```python
from __future__ import absolute_import
import os
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.dtype as dtype_util
import oneflow.python.framework.distribute as distribute_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.remote_blob as remote_blob_util
from oneflow.python.oneflow_export import oneflow_export
import oneflow_api
from typing import Optional, Union
@oneflow_export("one_hot")
def one_hot(
indices: oneflow_api.BlobDesc,
depth: int,
on_value: Union[int, float] = 1,
off_value: Union[int, float] = 0,
axis: int = -1,
dtype: Optional[dtype_util.dtype] = None,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator generates a onehot Blob from input Blob.
If input Blob's rank is `N`, the corresponding onehot Blob's rank is `N+1`. The new axis is generated on the specified dimension according to the parameter `axis`.
The locations represented by `indices` take value `on_value`, while other locations take `off_value`
Args:
indices (oneflow_api.BlobDesc): The input Blob.
depth (int): The length of onehot Blob.
on_value (Union[int, float], optional): The fill value when `indices[i] == i`. Defaults to 1.
off_value (Union[int, float], optional): The fill value when `indice[i] != i`. Defaults to 0.
axis (int, optional): The specified dimension that the new axis is generated on. Defaults to -1.
dtype (Optional[dtype_util.dtype], optional): The output data type, it can be "oneflow.int32", "oneflow.int64", "oneflow.float", "oneflow.double". Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Note:
The data type of input blob should be `int32` or `int64`
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def onehot_Job(x: tp.Numpy.Placeholder((4, ), dtype=flow.int32)
) -> tp.Numpy:
return flow.one_hot(indices=x,
depth=5,
axis=-1,
dtype=flow.int32)
x = np.array([0, 3, 1, 2]).astype(np.int32)
out = onehot_Job(x)
# out [[1 0 0 0 0]
# [0 0 0 1 0]
# [0 1 0 0 0]
# [0 0 1 0 0]]
Example 2:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def onehot_Job(x: tp.Numpy.Placeholder((4, ), dtype=flow.int32)
) -> tp.Numpy:
return flow.one_hot(indices=x,
depth=5,
axis=0,
dtype=flow.int32)
x = np.array([0, 3, 1, 2]).astype(np.int32)
out = onehot_Job(x)
# out [[1 0 0 0]
# [0 0 1 0]
# [0 0 0 1]
# [0 1 0 0]
# [0 0 0 0]]
Returns:
oneflow_api.BlobDesc: [description]
"""
out_ndims = len(indices.shape) + 1
if axis < 0:
axis += out_ndims
assert axis >= 0 and axis < out_ndims, ValueError(
"Expected axis to between [%d, %d). But received: %d "
% (-out_ndims, out_ndims, axis)
)
out = (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("OneHot_"))
.Op("one_hot")
.Input("indices", [indices])
.Attr("depth", int(depth))
.Attr("floating_on_value", float(on_value))
.Attr("integer_on_value", int(on_value))
.Attr("floating_off_value", float(off_value))
.Attr("integer_off_value", int(off_value))
.Attr("dtype", dtype)
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
if axis != (out_ndims - 1):
dim_list = list(range(0, out_ndims))
dim_list.insert(axis, out_ndims - 1)
dim_list.pop()
return flow.transpose(out, dim_list)
else:
return out
``` |
{
"source": "6-6-6/histnd",
"score": 2
} |
#### File: histnd/histnd/wrapper.py
```python
import numpy as np
from .histnd import histnd_serial_f64, histnd_parallel_f64
from .histnd import histnd_serial_i64, histnd_parallel_i64
from .histnd import histnd_serial_u64, histnd_parallel_u64
def check_input_shape(samples, bins):
if samples.shape[1] != len(bins):
return False
else:
return True
def histnd_parallel(samples, bins, chunksize):
if not check_input_shape(samples, bins):
return None
if samples.dtype in [np.float16, np.float32, np.float64]:
call = histnd_parallel_f64
samples = samples.astype(np.double)
bins = [ each_bin.astype(np.double) for each_bin in bins]
elif samples.dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
call = histnd_parallel_u64
samples = samples.astype(np.uint64)
bins = [ each_bin.astype(np.uint64) for each_bin in bins]
elif samples.dtype in [np.int8, np.int16, np.int32, np.int64]:
call = histnd_parallel_u64
samples = samples.astype(np.int64)
bins = [ each_bin.astype(np.int64) for each_bin in bins]
else:
raise NotImplementedError(f"Datatype {samples.dtype} is not supported.")
return call(samples, bins, chunksize)
def histnd_serial(samples, bins):
if not check_input_shape(samples, bins):
return None
if samples.dtype in [np.float16, np.float32, np.float64]:
call = histnd_serial_f64
samples = samples.astype(np.double)
bins = [ each_bin.astype(np.double) for each_bin in bins]
elif samples.dtype in [np.uint8, np.uint16, np.uint32, np.uint64]:
call = histnd_serial_u64
samples = samples.astype(np.uint64)
bins = [ each_bin.astype(np.uint64) for each_bin in bins]
elif samples.dtype in [np.int8, np.int16, np.int32, np.int64]:
call = histnd_serial_u64
samples = samples.astype(np.int64)
bins = [ each_bin.astype(np.int64) for each_bin in bins]
else:
raise NotImplementedError(f"Datatype {samples.dtype} is not supported.")
return call(samples, bins)
``` |
{
"source": "666vulcan/tvm",
"score": 2
} |
#### File: script/tir/special_stmt.py
```python
from typing import Callable, List, Optional, Tuple, Any, Mapping, Union
import synr
from synr import ast
from tvm.ir.expr import PrimExpr, Range
import tvm.tir
from tvm.runtime import Object, String
from tvm import te
from tvm.target import Target
from tvm.ir import Span
from tvm.tir import IntImm, IterVar
from .node import BufferSlice
from .utils import buffer_slice_to_region
from ..context_maintainer import BlockInfo, ContextMaintainer
from ..registry import register
from ..utils import (
get_param_list,
tvm_span_from_synr,
call_with_error_reporting,
)
def convert_to_int(
value: Union[IntImm, int],
arg_name: str,
report_error: Callable,
span: Union[Span, synr.ast.Span],
) -> int:
"""convert a const int or TVM IntImm to Python int.
Reports an error when input cannot be converted to int.
Parameters
----------
value : Union[tvm.tir.IntImm, int]
The input value to be converted.
arg_name : str
Function argument name for error reporting.
report_error: Callable
The report error function handle
span : Union[synr.ast.Span, tvm.ir.Span]
Location of the error
"""
if isinstance(value, IntImm):
return value.value
if isinstance(value, int):
return value
report_error(
f"Expected int or IntImm for {arg_name}, but got {str(type(value))}",
span,
)
class SpecialStmt:
"""Base class for all Special Stmts"""
def __init__(self, func: Callable, def_symbol: bool):
self.func: Callable = func
self.def_symbol: bool = def_symbol
self.node: Optional[synr.ast.Node] = None
self.context: Optional[ContextMaintainer] = None
def signature(self) -> Tuple[str, Tuple[list, list, Any]]:
return "tir." + self.func.__name__, get_param_list(self.func)
def handle(
self,
node: ast.Node,
context: ContextMaintainer,
arg_list: List[Any],
span: synr.ast.Span,
):
self.node = node
self.context = context
return call_with_error_reporting(
context.report_error, span, self.func, *arg_list, span=tvm_span_from_synr(span)
)
@register
class MatchBuffer(SpecialStmt):
"""Special Stmt match_buffer(param, shape, dtype, data, strides, elem_offset, scope, align,
offset_factor, buffer_type)
Note
----
This Special Stmt will perform different behavior depends on the type of param.
If the param is a var in function parameter, it will create a buffer from DLTensor.
Else if the param is a subregion of other buffers, then create a subregion match inside a block.
Example
-------
Match buffer from function parameter
.. code-block:: python
A = T.match_buffer(a, (128, 128), dtype="float32")
Match buffer from Buffer subregion
.. code-block:: python
A = T.match_buffer(B[0:128, i * 128 : i * 128 + 128], (128, 128), dtype="float32")
"""
def __init__(self):
def match_buffer(
param,
shape,
dtype="float32",
data=None,
strides=None,
elem_offset=None,
scope="global",
align=-1,
offset_factor=0,
buffer_type="default",
span=None,
):
if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1:
self.context.report_error(
"`match_buffer` must be assigned to a single buffer, "
"e.g. A = match_buffer(...)",
self.node.span,
)
if strides is None:
strides = []
align = convert_to_int(align, "align", self.context.report_error, self.node.span)
offset_factor = convert_to_int(
offset_factor, "offset_factor", self.context.report_error, self.node.span
)
buffer_name: str = self.node.lhs[0].id.name
buffer = tvm.tir.decl_buffer(
shape,
dtype,
buffer_name,
data,
strides,
elem_offset,
scope,
align,
offset_factor,
buffer_type,
span=span,
)
if isinstance(param, tvm.tir.Var):
if param not in self.context.func_params:
self.context.report_error(
"Can not bind non-input param to buffer", self.node.rhs.params[0].span
)
self.context.func_buffer_map[param] = buffer
elif isinstance(param, BufferSlice):
buffer_region = buffer_slice_to_region(param)
self.context.current_block_scope().match_buffers.append(
tvm.tir.MatchBufferRegion(buffer, buffer_region)
)
else:
self.context.report_error(
"The source of match_buffer expected Var or BufferSlice, but got "
+ str(type(param)),
self.node.rhs.params[0].span,
)
self.context.update_symbol(buffer_name, buffer, self.node)
super().__init__(match_buffer, def_symbol=True)
@register
class BufferDeclare(SpecialStmt):
"""Special Stmt buffer_decl(shape, dtype, data, strides, elem_offset, scope, align,
offset_factor, buffer_type)
Example
-------
.. code-block:: python
A = T.buffer_decl((128, 128), dtype="float32")
"""
def __init__(self):
def buffer_decl(
shape,
dtype="float32",
data=None,
strides=None,
elem_offset=None,
scope="global",
align=-1,
offset_factor=0,
buffer_type="default",
span=None,
):
if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1:
self.context.report_error(
"`buffer_decl` must be assigned to a single buffer, e.g. A = buffer_decl(...)",
self.node.span,
)
if strides is None:
strides = []
align = convert_to_int(align, "align", self.context.report_error, self.node.span)
offset_factor = convert_to_int(
offset_factor, "offset_factor", self.context.report_error, self.node.span
)
buffer_name: str = self.node.lhs[0].id.name
buffer = tvm.tir.decl_buffer(
shape,
dtype,
buffer_name,
data,
strides,
elem_offset,
scope,
align,
offset_factor,
buffer_type,
span=span,
)
self.context.update_symbol(buffer_name, buffer, self.node)
return buffer
super().__init__(buffer_decl, def_symbol=True)
@register
class AllocBuffer(SpecialStmt):
"""Special function alloc_buffer(shape, dtype, data, strides, elem_offset, scope, align,
offset_factor, buffer_type)
Example
-------
.. code-block:: python
A = T.alloc_buffer((128, 128), dtype="float32")
"""
def __init__(self):
def alloc_buffer(
shape,
dtype="float32",
data=None,
strides=None,
elem_offset=None,
scope="global",
align=-1,
offset_factor=0,
buffer_type="default",
span=None,
):
if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1:
self.context.report_error(
"`alloc_buffer` must be assigned to a single buffer, "
"e.g. A = alloc_buffer(...)",
self.node.span,
)
if strides is None:
strides = []
align = convert_to_int(align, "align", self.context.report_error, self.node.span)
offset_factor = convert_to_int(
offset_factor, "offset_factor", self.context.report_error, self.node.span
)
buffer_name: str = self.node.lhs[0].id.name
buffer = tvm.tir.decl_buffer(
shape,
dtype,
buffer_name,
data,
strides,
elem_offset,
scope,
align,
offset_factor,
buffer_type,
span=span,
)
if self.context.current_block_scope():
self.context.current_block_scope().alloc_buffers.append(buffer)
else:
# If it is allocated outside all blocks, allocate it under root block.
self.context.root_alloc_buffers.append(buffer)
self.context.update_symbol(buffer_name, buffer, self.node)
super().__init__(alloc_buffer, def_symbol=True)
@register
class BlockReads(SpecialStmt):
"""Special function reads([read_regions], *other_regions)
Note
----
*other_region is an unpackable list of BufferSlice to support
reads syntax sugar like reads(BufferRegion1, BufferRegion2, ...)
Example
-------
.. code-block:: python
T.reads([A[vi: vi + 4, vk: vk + 4], B[vk: vk + 4, vj]])
"""
def __init__(self):
def reads(
*read_regions: Union[BufferSlice, List[BufferSlice]],
span: Span = None,
):
assert self.context, "call 'exit_scope' before 'enter_scope'"
block_scope = self.context.current_block_scope()
if block_scope is None:
self.context.report_error(
"Expected to declare read regions inside a block.",
span,
)
if block_scope.reads is not None:
self.context.report_error(
"Duplicate write region declaration, "
+ "previous one is "
+ str(", ".join(str(x) for x in block_scope.reads)),
span,
)
if len(read_regions) > 1:
for read_region in read_regions:
if not isinstance(read_region, BufferSlice):
self.context.report_error(
"Incorrect input type. Expected *BufferSlice or List[BufferSlice],"
+ f" but got {type(read_regions)}",
span,
)
elif len(read_regions) == 1:
if isinstance(read_regions[0], list):
read_regions = read_regions[0]
block_scope.reads = read_regions
super().__init__(reads, def_symbol=False)
@register
class BlockWrites(SpecialStmt):
"""Special function writes([write_regions], *other_regions)
Note
----
*other_region is an unpackable list of BufferSlice to support
writes syntax sugar like writes(BufferRegion1, BufferRegion2, ...)
Example
-------
.. code-block:: python
T.writes([C[vi: vi + 4, vj])
"""
def __init__(self):
def writes(
*write_regions: Union[BufferSlice, List[BufferSlice]],
span: Span = None,
):
assert self.context, "call 'exit_scope' before 'enter_scope'"
block_scope = self.context.current_block_scope()
if block_scope is None:
self.context.report_error(
"Expected to declare write regions inside a block.",
span,
)
if block_scope.writes is not None:
self.context.report_error(
"Duplicate write region declaration, "
+ "previous one is "
+ str(", ".join(str(x) for x in block_scope.writes)),
span,
)
if len(write_regions) > 1:
for write_region in write_regions:
if not isinstance(write_region, BufferSlice):
self.context.report_error(
"Incorrect input type. Expected *BufferSlice or List[BufferSlice],"
+ f" but got {type(write_regions)}",
span,
)
elif len(write_regions) == 1:
if isinstance(write_regions[0], list):
write_regions = write_regions[0]
block_scope.writes = write_regions
super().__init__(writes, def_symbol=False)
@register
class BlockAttr(SpecialStmt):
"""Special function block_attr({attr_key: attr_value})
Example
-------
.. code-block:: python
T.block_attr({"double_buffer_scope": 1})
"""
def __init__(self):
def block_attr(attrs: Mapping[str, Object], span: Span = None):
assert self.context, "call 'exit_scope' before 'enter_scope'"
block_scope = self.context.current_block_scope()
if block_scope is None:
self.context.report_error(
"Expected to declare block annotations inside a block.",
span,
)
if block_scope.annotations is not None:
self.context.report_error(
"Duplicate block annotations declaration, "
+ "previous one is "
+ str(block_scope.annotations),
span,
)
attrs = {
key: String(val) if isinstance(val, str) else val for key, val in attrs.items()
}
block_scope.annotations = attrs
super().__init__(block_attr, def_symbol=False)
class BlockAxis(SpecialStmt):
"""Special stmt for defining a spatial block axis
axis.S(dom, iter_value)
Example
-------
.. code-block:: python
vi = T.axis.S(128, i * 4 + j)
"""
def axis(
self,
var_name: str,
dom: Union[PrimExpr, Range],
value: PrimExpr,
iter_type: int,
span: Optional[Span] = None,
) -> None:
"""
Helper function for creating block axis
Parameters
----------
var_name : str
The name_hint of var
dom : Union[PrimExpr, Range]
The iter domain.
value : PrimExpr
The binding value
iter_type : int
The iteration type.
span : Optional[Span]
The location of this for in the source code.
"""
assert self.context, "call 'exit_scope' before 'enter_scope'"
block_scope: BlockInfo = self.context.current_block_scope()
if block_scope is None:
self.context.report_error(
"Expected to declare block axes inside a block.",
self.node.span,
)
if var_name in [iter_var.var.name for iter_var in block_scope.iter_vars]:
self.context.report_error("Duplicate block axis " + var_name, self.node.span)
block_var = tvm.tir.Var(var_name, dtype="int32")
dom = tvm.runtime.convert(dom)
if isinstance(dom, PrimExpr):
dom = tvm.ir.Range(dom)
elif isinstance(dom, tvm.ir.container.Array) and len(dom) == 2:
dom = tvm.ir.Range(dom[0], dom[1])
elif not isinstance(dom, tvm.ir.Range):
self.context.report_error(
f"Block axis domain expected PrimExpr or Range, but got {type(dom)}",
self.node.span,
)
value = tvm.runtime.convert(value)
if not isinstance(value, PrimExpr):
self.context.report_error(
f"Block axis value expected PrimExpr, but got {type(value)}",
self.node.span,
)
iter_var = tvm.tir.IterVar(dom, block_var, iter_type)
block_scope.iter_vars.append(iter_var)
block_scope.iter_values.append(value)
self.context.update_symbol(var_name, block_var, self.node)
@register
class BlockAxisSpatial(BlockAxis):
"""Special stmt for defining a spatial block axis
axis.spatial(dom, iter_value)
Example
-------
.. code-block:: python
vi = T.axis.spatial(128, k)
"""
def __init__(self):
def axis_spatial(
dom: Union[PrimExpr, Tuple[PrimExpr, PrimExpr]], value: PrimExpr, span: Span = None
):
if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1:
self.context.report_error(
"`axis.spatial` must be assigned to a var, e.g. vi = axis.spatial(...)",
self.node.span,
)
self.axis(self.node.lhs[0].id.name, dom, value, IterVar.DataPar)
super().__init__(axis_spatial, def_symbol=True)
def signature(self) -> Tuple[str, Tuple[list, list, Any]]:
return "tir.axis.spatial", get_param_list(self.func)
@register
class BlockAxisS(BlockAxis):
"""The sugar special stmt for defining a spatial block axis
axis.S(dom, iter_value)
Example
-------
.. code-block:: python
vi = T.axis.S(128, k)
"""
def __init__(self):
def axis_spatial(
dom: Union[PrimExpr, Tuple[PrimExpr, PrimExpr]], value: PrimExpr, span: Span = None
):
if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1:
self.context.report_error(
"`axis.S` must be assigned to a var, e.g. vi = axis.S(...)",
self.node.span,
)
self.axis(self.node.lhs[0].id.name, dom, value, IterVar.DataPar)
super().__init__(axis_spatial, def_symbol=True)
def signature(self) -> Tuple[str, Tuple[list, list, Any]]:
return "tir.axis.S", get_param_list(self.func)
@register
class BlockAxisReduce(BlockAxis):
"""Special stmt for defining a reduce block axis
axis.reduce(dom, iter_value)
Example
-------
.. code-block:: python
vi = T.axis.reduce(128, k)
"""
def __init__(self):
def axis_reduce(
dom: Union[PrimExpr, Tuple[PrimExpr, PrimExpr]], value: PrimExpr, span: Span = None
):
if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1:
self.context.report_error(
"`axis.reduce` must be assigned` to a var, e.g. vi = axis.reduce(...)",
self.node.span,
)
self.axis(self.node.lhs[0].id.name, dom, value, IterVar.CommReduce)
super().__init__(axis_reduce, def_symbol=True)
def signature(self) -> Tuple[str, Tuple[list, list, Any]]:
return "tir.axis.reduce", get_param_list(self.func)
@register
class BlockAxisR(BlockAxis):
"""The sugar special stmt for defining a reduce block axis
axis.R(dom, iter_value)
Example
-------
.. code-block:: python
vi = T.axis.R(128, k)
"""
def __init__(self):
def axis_reduce(
dom: Union[PrimExpr, Tuple[PrimExpr, PrimExpr]], value: PrimExpr, span: Span = None
):
if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1:
self.context.report_error(
"`axis.R` must be assigned to a var, e.g. vi = axis.R(...)",
self.node.span,
)
self.axis(self.node.lhs[0].id.name, dom, value, IterVar.CommReduce)
super().__init__(axis_reduce, def_symbol=True)
def signature(self) -> Tuple[str, Tuple[list, list, Any]]:
return "tir.axis.R", get_param_list(self.func)
@register
class BlockAxisScan(BlockAxis):
"""Special stmt for defining a ordered block axis
axis.scan(dom, iter_value)
Example
-------
.. code-block:: python
vi = T.axis.scan(128, k)
"""
def __init__(self):
def axis_scan(
dom: Union[PrimExpr, Tuple[PrimExpr, PrimExpr]], value: PrimExpr, span: Span = None
):
if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1:
self.context.report_error(
"`axis.scan` must be assigned to a var, e.g. vi = axis.scan(...)",
self.node.span,
)
self.axis(self.node.lhs[0].id.name, dom, value, IterVar.Ordered)
super().__init__(axis_scan, def_symbol=True)
def signature(self) -> Tuple[str, Tuple[list, list, Any]]:
return "tir.axis.scan", get_param_list(self.func)
@register
class BlockAxisOpaque(BlockAxis):
"""Special stmt for defining a opaque block axis
axis.opaque(dom, iter_value)
Example
-------
.. code-block:: python
vi = T.axis.opaque(128, k)
"""
def __init__(self):
def axis_opaque(
dom: Union[PrimExpr, Tuple[PrimExpr, PrimExpr]], value: PrimExpr, span: Span = None
):
if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) == 1:
self.context.report_error(
"`axis.opaque` must be assigned to a var, e.g. vi = axis.opaque(...)",
self.node.span,
)
self.axis(self.node.lhs[0].id.name, dom, value, IterVar.DimInfo)
super().__init__(axis_opaque, def_symbol=True)
def signature(self) -> Tuple[str, Tuple[list, list, Any]]:
return "tir.axis.opaque", get_param_list(self.func)
@register
class BlockAxisRemap(BlockAxis):
"""Special stmt for remapping loops vars to block axes.
axis.remap(iter_type, iter_value)
Note
----
Iter_type is a string consisting of 'S' and 'R', where 'S' means
for spatial and 'R' means for reduce.
Example
-------
.. code-block:: python
vi, vj = T.axis.remap("SS", [i, j])
"""
def __init__(self):
def axis_remap(iter_types: str, loop_vars: List[tvm.tir.expr.Var], span: Span = None):
if not isinstance(self.node, ast.Assign) or not len(self.node.lhs) >= 1:
self.context.report_error(
"`axis.remap` must be assigned to one or more vars, "
"e.g. vi, vj = axis.remap(...)",
self.node.span,
)
var_num: int = len(self.node.lhs)
if var_num != len(iter_types):
self.context.report_error(
f"`iter_type` expected {var_num} charactor(s), "
f"but got {len(iter_types)}: {iter_types}",
span,
)
if var_num != len(loop_vars):
self.context.report_error(
f"`iter_type` expected {var_num} loop var(s), "
f"but got {len(loop_vars)}: {loop_vars}",
span,
)
for var, iter_ty, loop_var in zip(self.node.lhs, iter_types, loop_vars):
iter_type: int
if iter_ty == "S":
iter_type = IterVar.DataPar
elif iter_ty == "R":
iter_type = IterVar.CommReduce
else:
self.context.report_error(
f'`iter_type` only expected "S" (for spatial) or "R" (for reduce), '
f'but got "{iter_ty}"',
span,
)
if not isinstance(loop_var, tvm.tir.expr.Var):
self.context.report_error(
f"Values of `axis.remap` expected single loop var, but got {loop_var}",
loop_var.span,
)
loops = self.context.loop_stack
if loop_var not in loops:
self.context.report_error(
f"Cannot find loop var {loop_var} in loop nesting.",
span,
)
self.axis(var.id.name, loops[loop_var], loop_var, iter_type)
super().__init__(axis_remap, def_symbol=True)
def signature(self) -> Tuple[str, Tuple[list, list, Any]]:
return "tir.axis.remap", get_param_list(self.func)
@register
class BlockPredicate(SpecialStmt):
"""Special function where(predicate)
Example
-------
.. code-block:: python
T.where(i < 4)
"""
def __init__(self):
def where(predicate, span=None):
assert self.context, "call 'exit_scope' before 'enter_scope'"
block_scope = self.context.current_block_scope()
if block_scope is None:
self.context.report_error(
"Expected to declare the predicate inside a block.",
span,
)
if block_scope.predicate is not None:
self.context.report_error(
"Duplicate block predicate declaration, "
+ "previous one is "
+ str(block_scope.predicate),
span,
)
block_scope.predicate = predicate
super().__init__(where, def_symbol=False)
@register
class VarDef(SpecialStmt):
"""Special function for defining a Var"""
def __init__(self):
def var(dtype, span):
assert isinstance(
self.node, ast.Assign
), f"VarDef expected ast.Assign but got {type(self.node)}"
names = [x.id.name for x in self.node.lhs]
if len(names) != 1:
self.context.report_error(
f"VarDef expected assign to only one var, but got {names}", span
)
v = te.var(names[0], dtype, span=span)
self.context.update_symbol(v.name, v, self.node)
super().__init__(var, def_symbol=True)
@register
class BufferVarDef(SpecialStmt):
"""Special function for defining a variable of pointer type"""
def __init__(self):
def buffer_var(dtype, storage_scope, span):
assert isinstance(
self.node, ast.Assign
), f"BufferVarDef expected ast.Assign but got {type(self.node)}"
names = [x.id.name for x in self.node.lhs]
if len(names) != 1:
self.context.report_error(
f"VarDef expected assign to only one var, but got {names}", span
)
ptr_type = tvm.ir.PointerType(tvm.ir.PrimType(dtype), storage_scope)
v = te.var(names[0], ptr_type, span=span)
self.context.update_symbol(v.name, v, self.node)
super().__init__(buffer_var, def_symbol=True)
@register
class EnvThread(SpecialStmt):
"""Bind a var to thread env"""
def __init__(self):
def env_thread(env_name, span):
assert isinstance(
self.node, ast.Assign
), f"EnvThread expected ast.Assign but got {type(self.node)}"
names = [x.id.name for x in self.node.lhs]
if len(names) != 1:
self.context.report_error(
f"VarDef expected assign to only one var, but got {names}", span
)
v = te.var(names[0], span=span)
self.context.func_var_env_dict[v] = env_name
self.context.update_symbol(v.name, v, self.node)
super().__init__(env_thread, def_symbol=True)
@register
class FuncAttr(SpecialStmt):
"""Special Stmt for declaring the DictAttr of PrimFunc
Example
-------
.. code-block:: python
T.func_attr({"tir.noalias": True, "global_symbol"})
"""
def __init__(self):
def func_attr(dict_attr, span):
self.context.func_dict_attr = dict_attr
super().__init__(func_attr, def_symbol=False)
@register
class PreflattenedBufferMap(SpecialStmt):
"""Special Stmt for declaring the PrimFunc::preflattened_buffer_map
Example
-------
.. code-block:: python
T.preflattened_buffer_map({})
"""
def __init__(self):
def preflattened_buffer(
postflattened,
shape,
dtype="float32",
data=None,
strides=None,
elem_offset=None,
scope="global",
align=-1,
offset_factor=0,
buffer_type="default",
span=None,
):
param = None
for key, value in self.context.func_buffer_map.items():
if value.same_as(postflattened):
param = key
assert (
param is not None
), f"Post-flatten buffer {postflattened.name} does not appear in the buffer map."
buffer_name: str = f"{postflattened.name}_preflatten"
preflattened = tvm.tir.decl_buffer(
shape,
dtype,
buffer_name,
data,
strides,
elem_offset,
scope,
align,
offset_factor,
buffer_type,
span=span,
)
self.context.func_preflattened_buffer_map[param] = preflattened
super().__init__(preflattened_buffer, def_symbol=False)
@register
class TargetAttrValue(SpecialStmt):
"""Special Stmt for target attr value.
Example
-------
.. code-block:: python
T.target("llvm")
"""
def __init__(self):
def target(*args, span):
self.context.report_error(f"T.target should not appear as a stmt", span)
super().__init__(target, def_symbol=False)
def __call__(self, target_config):
if not isinstance(target_config, (str, dict)):
raise ValueError(
f"T.target expected a config dict or string, but got {type(target_config)}"
)
return Target(target_config)
```
#### File: contrib/test_cmsisnn/test_scalar_to_tensor_constant.py
```python
import sys
import numpy as np
import pytest
import tvm
from tvm import relay
tvm._ffi._init_api("relay.ext.cmsisnn.transform", __name__)
class CheckFunctionsForConstants(tvm.relay.ExprVisitor):
def __init__(self):
super().__init__()
self.num_constants_ = 0
def visit_call(self, call):
super().visit_call(call)
for arg in call.args:
if isinstance(arg, relay.Constant) and arg.data.numpy().ndim > 0:
self.num_constants_ += 1
def check_num_constants(self, func):
assert self.num_constants_ == 0, "Functions should not have constant arguments in Calls"
def set_external_func_attr(func, compiler, ext_symbol):
func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1))
func = func.with_attr("Compiler", compiler)
func = func.with_attr("global_symbol", ext_symbol)
return func
def set_composite_func_attr(func, name):
func = func.with_attr("Composite", name)
return func
@tvm.testing.requires_cmsisnn
def test_single_scalar_position_0():
x0 = relay.var("x0", shape=None)
x1 = relay.var("x1", shape=(8, 8))
z1 = x0 + x1
lf = relay.Function([x0, x1], z1, relay.TensorType((8, 8), "float32"))
lf = set_composite_func_attr(lf, "cmsis-nn.qnn_add")
y0 = relay.expr.const(3, "float32")
y1 = relay.var("y1", shape=(8, 8))
c0 = relay.Call(lf, [y0, y1])
ef = relay.Function([y1], c0, relay.TensorType((8, 8), "float32"))
x = relay.var("x", shape=(8, 8))
ev = relay.GlobalVar("external_function")
ef = set_external_func_attr(ef, "cmsis-nn", ev.name_hint)
c = relay.Call(ev, [x])
mf = relay.Function([x], c, relay.TensorType((8, 8), "float32"))
mv = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[ev] = ef
mod[mv] = mf
mod = relay.transform.InferType()(mod)
mod = ScalarToTensorConstants()(mod)
check_for_constants = CheckFunctionsForConstants()
check_for_constants.visit_call(mod[ev].body)
assert (
check_for_constants.num_constants_ == 1
), "Scalar constant wasn't converted into tensor constant"
@tvm.testing.requires_cmsisnn
def test_single_scalar_position_1():
x0 = relay.var("x0", shape=(8, 8))
x1 = relay.var("x1", shape=None)
z1 = x0 + x1
lf = relay.Function([x0, x1], z1, relay.TensorType((8, 8), "float32"))
lf = set_composite_func_attr(lf, "cmsis-nn.qnn_add")
y0 = relay.var("y0", shape=(8, 8))
y1 = relay.expr.const(3, "float32")
c0 = relay.Call(lf, [y0, y1])
ef = relay.Function([y0], c0, relay.TensorType((8, 8), "float32"))
x = relay.var("x", shape=(8, 8))
ev = relay.GlobalVar("external_function")
ef = set_external_func_attr(ef, "cmsis-nn", ev.name_hint)
c = relay.Call(ev, [x])
mf = relay.Function([x], c, relay.TensorType((8, 8), "float32"))
mv = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[ev] = ef
mod[mv] = mf
mod = relay.transform.InferType()(mod)
mod = ScalarToTensorConstants()(mod)
check_for_constants = CheckFunctionsForConstants()
check_for_constants.visit_call(mod[ev].body)
assert (
check_for_constants.num_constants_ == 1
), "Scalar constant wasn't converted into tensor constant"
@tvm.testing.requires_cmsisnn
def test_two_scalars():
x1 = relay.var("x1", shape=None)
x2 = relay.var("x2", shape=None)
z1 = x1 + x2
lf = relay.Function([x1, x2], z1, relay.TensorType((), "float32"))
lf = set_composite_func_attr(lf, "cmsis-nn.qnn_add")
y0 = relay.expr.const(5, "float32")
y1 = relay.expr.const(3, "float32")
c0 = relay.Call(lf, [y0, y1])
ef = relay.Function([], c0, relay.TensorType((), "float32"))
ev = relay.GlobalVar("external_function")
ef = set_external_func_attr(ef, "cmsis-nn", ev.name_hint)
c = relay.Call(ev, [])
mf = relay.Function([], c, relay.TensorType((), "float32"))
mv = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[ev] = ef
mod[mv] = mf
mod = relay.transform.InferType()(mod)
mod = ScalarToTensorConstants()(mod)
check_for_constants = CheckFunctionsForConstants()
check_for_constants.visit_call(mod[ev].body)
assert (
check_for_constants.num_constants_ == 0
), "Scalar constant wasn't converted into tensor constant"
@tvm.testing.requires_cmsisnn
def test_two_tensor_constants():
x0 = relay.var("x0", shape=(8, 8))
x1 = relay.var("x1", shape=(8, 8))
z1 = x0 + x1
lf = relay.Function([x0, x1], z1, relay.TensorType((8, 8), "float32"))
lf = set_composite_func_attr(lf, "cmsis-nn.qnn_add")
y0 = relay.const(np.random.uniform(0, 1, (8, 8)).astype("float32"), "float32")
y1 = relay.const(np.random.uniform(0, 1, (8, 8)).astype("float32"), "float32")
c0 = relay.Call(lf, [y0, y1])
ef = relay.Function([], c0, relay.TensorType((8, 8), "float32"))
ev = relay.GlobalVar("external_function")
ef = set_external_func_attr(ef, "cmsis-nn", ev.name_hint)
c = relay.Call(ev, [])
mf = relay.Function([], c, relay.TensorType((8, 8), "float32"))
mv = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[ev] = ef
mod[mv] = mf
mod = relay.transform.InferType()(mod)
mod = ScalarToTensorConstants()(mod)
check_for_constants = CheckFunctionsForConstants()
check_for_constants.visit_call(mod[ev].body)
assert (
check_for_constants.num_constants_ == 2
), "Scalar constant wasn't converted into tensor constant"
@tvm.testing.requires_cmsisnn
def test_non_cmsisnn_ext_func():
"""Non CMSISNN functions should not be altered."""
def get_mod():
x1 = relay.var("x1", shape=None)
x2 = relay.var("x2", shape=None)
z1 = x1 + x2
lf = relay.Function([x1, x2], z1, relay.TensorType((), "float32"))
lf = set_composite_func_attr(lf, "cmsis-nn.qnn_add")
y0 = relay.expr.const(5, "float32")
y1 = relay.expr.const(3, "float32")
c0 = relay.Call(lf, [y0, y1])
ef = relay.Function([], c0, relay.TensorType((), "float32"))
ev = relay.GlobalVar("external_function")
ef = set_external_func_attr(ef, "foo", ev.name_hint)
c = relay.Call(ev, [])
mf = relay.Function([], c, relay.TensorType((), "float32"))
mv = relay.GlobalVar("main")
mod = tvm.IRModule()
mod[ev] = ef
mod[mv] = mf
mod = relay.transform.InferType()(mod)
return mod
expected = get_mod()["external_function"].body
actual = ScalarToTensorConstants()(get_mod())["external_function"].body
assert tvm.ir.structural_equal(expected, actual)
if __name__ == "__main__":
sys.exit(pytest.main([__file__] + sys.argv[1:]))
```
#### File: python/unittest/test_meta_schedule_measure_callback.py
```python
import re
from typing import List
from random import random
import pytest
import tvm
from tvm.ir import IRModule, assert_structural_equal
from tvm.meta_schedule.builder import BuilderResult
from tvm.meta_schedule.measure_callback import PyMeasureCallback
from tvm.meta_schedule.builder import PyBuilder, BuilderInput, BuilderResult
from tvm.meta_schedule.runner import (
RunnerInput,
RunnerResult,
RunnerFuture,
PyRunnerFuture,
PyRunner,
)
from tvm.meta_schedule.database import PyDatabase, Workload, TuningRecord
from tvm.meta_schedule.search_strategy import MeasureCandidate
from tvm.meta_schedule.task_scheduler import RoundRobin, TaskScheduler
from tvm.meta_schedule.utils import derived_object
from tvm.script import tir as T
from tvm.tir.schedule import Schedule
# pylint: disable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument,
# fmt: off
@tvm.script.ir_module
class Matmul:
@T.prim_func
def main(a: T.handle, b: T.handle, c: T.handle) -> None:
T.func_attr({"global_symbol": "main"})
A = T.match_buffer(a, (1024, 1024), "float32")
B = T.match_buffer(b, (1024, 1024), "float32")
C = T.match_buffer(c, (1024, 1024), "float32")
for i, j, k in T.grid(1024, 1024, 1024):
with T.block("matmul"):
vi, vj, vk = T.axis.remap("SSR", [i, j, k])
with T.init():
C[vi, vj] = 0.0
C[vi, vj] = C[vi, vj] + A[vi, vk] * B[vk, vj]
# fmt: on
# pylint: enable=invalid-name,no-member,line-too-long,too-many-nested-blocks,no-self-argument
@derived_object
class DummyRunnerFuture(PyRunnerFuture):
def done(self) -> bool:
return True
def result(self) -> RunnerResult:
return RunnerResult([random.uniform(5, 30) for _ in range(random.randint(1, 10))], None)
@derived_object
class DummyBuilder(PyBuilder):
def build(self, build_inputs: List[BuilderInput]) -> List[BuilderResult]:
return [BuilderResult("test_path", None) for _ in build_inputs]
@derived_object
class DummyRunner(PyRunner):
def run(self, runner_inputs: List[RunnerInput]) -> List[RunnerFuture]:
return [DummyRunnerFuture() for _ in runner_inputs]
@derived_object
class DummyDatabase(PyDatabase):
def __init__(self):
super().__init__()
self.records = []
self.workload_reg = []
def has_workload(self, mod: IRModule) -> Workload:
for workload in self.workload_reg:
if tvm.ir.structural_equal(workload.mod, mod):
return True
return False
def commit_tuning_record(self, record: TuningRecord) -> None:
self.records.append(record)
def commit_workload(self, mod: IRModule) -> Workload:
for workload in self.workload_reg:
if tvm.ir.structural_equal(workload.mod, mod):
return workload
workload = Workload(mod)
self.workload_reg.append(workload)
return workload
def get_top_k(self, workload: Workload, top_k: int) -> List[TuningRecord]:
return list(
filter(
lambda x: x.workload == workload,
sorted(self.records, key=lambda x: sum(x.run_secs) / len(x.run_secs)),
)
)[: int(top_k)]
def __len__(self) -> int:
return len(self.records)
def print_results(self) -> None:
print("\n".join([str(r) for r in self.records]))
def test_meta_schedule_measure_callback():
@derived_object
class FancyMeasureCallback(PyMeasureCallback):
def apply(
self,
task_scheduler: TaskScheduler,
task_id: int,
measure_candidates: List[MeasureCandidate],
builds: List[BuilderResult],
results: List[RunnerResult],
) -> None:
assert len(measure_candidates) == 1
assert_structural_equal(measure_candidates[0].sch.mod, Matmul)
assert (
len(builds) == 1
and builds[0].error_msg is None
and builds[0].artifact_path == "test_build"
)
assert (
len(results) == 1 and results[0].error_msg is None and len(results[0].run_secs) == 2
)
measure_callback = FancyMeasureCallback()
measure_callback.apply(
RoundRobin([], DummyBuilder(), DummyRunner(), DummyDatabase()),
0,
[MeasureCandidate(Schedule(Matmul), None)],
[BuilderResult("test_build", None)],
[RunnerResult([1.0, 2.1], None)],
)
def test_meta_schedule_measure_callback_fail():
@derived_object
class FailingMeasureCallback(PyMeasureCallback):
def apply(
self,
task_scheduler: TaskScheduler,
task_id: int,
measure_candidates: List[MeasureCandidate],
builds: List[BuilderResult],
results: List[RunnerResult],
) -> None:
raise ValueError("test")
measure_callback = FailingMeasureCallback()
with pytest.raises(ValueError, match="test"):
measure_callback.apply(
RoundRobin([], DummyBuilder(), DummyRunner(), DummyDatabase()),
0,
[MeasureCandidate(Schedule(Matmul), None)],
[BuilderResult("test_build", None)],
[RunnerResult([1.0, 2.1], None)],
)
def test_meta_schedule_measure_callback_as_string():
@derived_object
class NotSoFancyMeasureCallback(PyMeasureCallback):
def apply(
self,
task_scheduler: "TaskScheduler",
task_id: int,
measure_candidates: List[MeasureCandidate],
builds: List[BuilderResult],
results: List[RunnerResult],
) -> None:
pass
measure_callback = NotSoFancyMeasureCallback()
pattern = re.compile(r"meta_schedule.NotSoFancyMeasureCallback\(0x[a-f|0-9]*\)")
assert pattern.match(str(measure_callback))
if __name__ == "__main__":
test_meta_schedule_measure_callback()
test_meta_schedule_measure_callback_fail()
test_meta_schedule_measure_callback_as_string()
```
#### File: python/unittest/test_tir_transform_common_subexpr_elim.py
```python
import tvm
from tvm import te
# A test program which gives the opportunity for the CSE pass to introduce two new variables, at two different levels
def test_cse():
z1 = te.var("z1")
z2 = te.var("z2")
z3 = te.var("z3")
i1 = te.var("i1")
i2 = te.var("i2")
x = te.var("x")
y = te.var("y")
a = te.var("a")
b = te.var("b")
dtype = "int32"
buffer = tvm.tir.decl_buffer((50,), dtype)
# Test prog :
# let z1=1 in let z2=2 in
# Mem[i1] = z1+z2;
# let x = 1 in let y = 1 in
# let a = (x+y) + (z1+z2) in
# let b = (x+y) + z3 in
# Mem[i2] = a+b;
body = tvm.tir.LetStmt(
z1,
1,
tvm.tir.LetStmt(
z2,
2,
tvm.tir.SeqStmt(
[
tvm.tir.BufferStore(buffer, z1 + z2, [i1]),
tvm.tir.LetStmt(
x,
1,
tvm.tir.LetStmt(
y,
1,
tvm.tir.LetStmt(
a,
(x + y) + (z1 + z2),
tvm.tir.LetStmt(
b, (x + y) + z3, tvm.tir.BufferStore(buffer, a + b, [i2])
),
),
),
),
]
),
),
)
# This test program gives the opportunity to introduce two new variables, at two different levels
# and to perform replacements in the value of "a" and "b", using these new variables
# We will check all of that underneath and more, making also sure that nothing else has been changed
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, z3], body))
body = tvm.tir.transform.CommonSubexprElimTIR()(mod)
tvm.transform.PrintIR()(body)
body = body["main"].body # Gets the body of the main, i.e. the full statement
assert body.var.name == "z1"
assert body.value == 1
body = body.body
assert body.var.name == "z2"
assert body.value == 2
# This is the let-in for the first variable generated cse_var_1
assert isinstance(body.body, tvm.tir.LetStmt)
body = body.body
# And this is the name and value of this variable
cse_var_1 = body.var # Keep the variable accessible for later checking the replacements
assert body.var.name == "cse_var_1"
assert tvm.ir.structural_equal(body.value, z1 + z2)
assert isinstance(body.body, tvm.tir.SeqStmt)
body = body.body
assert isinstance(body[0], tvm.tir.BufferStore)
assert isinstance(body[1], tvm.tir.LetStmt)
body = body[1]
assert body.var.name == "x"
assert body.value == 1
body = body.body
assert body.var.name == "y"
assert body.value == 1
# This is the let-in for the second variable generated cse_var_2
assert isinstance(body.body, tvm.tir.LetStmt)
body = body.body
# And this is the name and value of this variable
cse_var_2 = body.var # Keep the variable accessible for later checking the replacements
assert body.var.name == "cse_var_2"
assert tvm.ir.structural_equal(body.value, x + y)
body = body.body
body.var.name == "a"
# Check that the replacement has been done correctly!
assert tvm.ir.structural_equal(body.value, cse_var_2 + cse_var_1)
body = body.body
body.var.name == "b"
# Check that the replacement has been done correctly!
assert tvm.ir.structural_equal(body.value, cse_var_2 + z3)
assert isinstance(body.body, tvm.tir.BufferStore)
# First specific test for if nodes : Some duplicated computations appear only in one branch (here the Then branch), not in both branches.
# In this case, the CSE pass should introduce the redundant computation at the top if the Then branch, not before the whole If
# (otherwise that would lead to some computations being computed for nothing when it is the Else branch that is executed).
def test_cse_ifNode_1():
b = te.var("b")
i1 = te.var("i1")
i2 = te.var("i2")
i3 = te.var("i3")
y = te.var("y")
z = te.var("z")
dtype = "int32"
buffer = tvm.tir.decl_buffer((50,), dtype)
# Test prog :
# let b=1 in
# if(b) {
# Mem[i1] = y+z
# Mem[i2] = y+z
# }
# else {
# Mem[i3] = y
# }
body = tvm.tir.LetStmt(
b,
1,
tvm.tir.IfThenElse(
b,
tvm.tir.SeqStmt(
[tvm.tir.BufferStore(buffer, y + z, [i1]), tvm.tir.BufferStore(buffer, y + z, [i2])]
),
tvm.tir.BufferStore(buffer, y, [i3]),
),
)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, i3, y, z], body))
body = tvm.tir.transform.CommonSubexprElimTIR()(mod)
tvm.transform.PrintIR()(body)
body = body["main"].body # Gets the body of the main, i.e. the full statement
assert body.var.name == "b"
assert body.value == 1
assert isinstance(body.body, tvm.tir.IfThenElse)
body = body.body
assert isinstance(body.then_case, tvm.tir.LetStmt)
body = body.then_case
# The let-in introduced by the CSE should appear now, inside the Then branch of the If node
assert body.var.name == "cse_var_1"
# and it should contain the expression (y+z) that was redundant
assert tvm.ir.structural_equal(body.value, y + z)
# Second test for if nodes : Some duplicated computations appear in both the Then and the Else branch.
# In this case, the CSE pass should introduce the redundant computation before the whole If node, because
# regardless of the execution path, it is going to be computed.
def test_cse_ifNode_2():
b = te.var("b")
i1 = te.var("i1")
i2 = te.var("i2")
i3 = te.var("i3")
y = te.var("y")
z = te.var("z")
dtype = "int32"
buffer = tvm.tir.decl_buffer((50,), dtype)
# Test prog :
# let b=1 in
# if(b) {
# Mem[i1] = y+z
# Mem[i2] = y
# }
# else {
# Mem[i3] = y+z
# }
body = tvm.tir.LetStmt(
b,
1,
tvm.tir.IfThenElse(
b,
tvm.tir.SeqStmt(
[
tvm.tir.BufferStore(buffer, y + z, [i1]), # (y+z) is present in the Then branch
tvm.tir.BufferStore(buffer, y, [i2]),
]
),
tvm.tir.BufferStore(buffer, y + z, [i3]), # and also present in the Else branch
),
)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, i3, y, z], body))
body = tvm.tir.transform.CommonSubexprElimTIR()(mod)
tvm.transform.PrintIR()(body)
body = body["main"].body # Gets the body of the main, i.e. the full statement
assert isinstance(body, tvm.tir.LetStmt)
# The let-in introduced by the CSE should appear now, at the toplevel (i.e. before the If)
assert body.var.name == "cse_var_1"
# and it should contain the expression (y+z) that was redundant
assert tvm.ir.structural_equal(body.value, y + z)
# Test commoning in cascade : after having introduced a big exp ((x+y)+z) into a new variable,
# it will become possible to do another commoning for (x+y) which appears both in the new variable
# and in the rest of the program.
def test_cse_cascade():
i1 = te.var("i1")
i2 = te.var("i2")
i3 = te.var("i3")
x = te.var("x")
y = te.var("y")
z = te.var("z")
dtype = "int32"
buffer = tvm.tir.decl_buffer((50,), dtype)
# Test prog :
# Mem[i1] = (x+y)+z;
# Mem[i2] = (x+y)+z;
# Mem[i3] = x+y
body = tvm.tir.SeqStmt(
[
tvm.tir.BufferStore(buffer, (x + y) + z, [i1]),
tvm.tir.BufferStore(buffer, (x + y) + z, [i2]),
tvm.tir.BufferStore(buffer, (x + y), [i3]),
]
)
mod = tvm.IRModule.from_expr(tvm.tir.PrimFunc([i1, i2, i3, x, y, z], body))
body = tvm.tir.transform.CommonSubexprElimTIR()(mod)
tvm.transform.PrintIR()(body)
body = body["main"].body # Gets the body of the main, i.e. the full statement
assert isinstance(body, tvm.tir.LetStmt)
# The second let-in (by order introduced) introduced by the CSE should appear first
cse_var_2 = body.var # Keep the variable accessible for later checking the replacements
assert body.var.name == "cse_var_2"
# and it should contain the expression (x+y)
assert tvm.ir.structural_equal(body.value, (x + y))
body = body.body
assert isinstance(body, tvm.tir.LetStmt)
# The first let-in (by order introduced) introduced by the CSE should appear now, after the 2nd
cse_var_1 = body.var # Keep the variable accessible for later checking the replacements
assert body.var.name == "cse_var_1"
# and it should contain the expression cse_var_2+z
assert tvm.ir.structural_equal(body.value, cse_var_2 + z)
body = body.body
assert isinstance(body, tvm.tir.SeqStmt)
assert isinstance(body[0], tvm.tir.BufferStore)
assert isinstance(body[1], tvm.tir.BufferStore)
assert isinstance(body[2], tvm.tir.BufferStore)
store1 = body[0]
store2 = body[1]
store3 = body[2]
assert tvm.ir.structural_equal(store1.value, cse_var_1)
assert tvm.ir.structural_equal(store2.value, cse_var_1)
assert tvm.ir.structural_equal(store3.value, cse_var_2)
if __name__ == "__main__":
test_cse()
test_cse_ifNode_1()
test_cse_ifNode_2()
test_cse_cascade()
``` |
{
"source": "666wcy/heroku-1",
"score": 3
} |
#### File: web/src/credentials.py
```python
import googleapiclient.discovery
import httplib2
import oauth2client
def refreshCredentials(config):
credentials = oauth2client.client.GoogleCredentials(
config.get("access_token"),
config.get("client_id"),
config.get("client_secret"),
config.get("refresh_token"),
None,
"https://accounts.google.com/o/oauth2/token",
None,
)
http = credentials.authorize(httplib2.Http())
credentials.refresh(http)
config["access_token"] = credentials.access_token
config["token_expiry"] = str(credentials.token_expiry)
drive = googleapiclient.discovery.build("drive", "v3", credentials=credentials)
return config, drive
``` |
{
"source": "666Yeet/BAC",
"score": 3
} |
#### File: 666Yeet/BAC/BAC.py
```python
import pyautogui
import time
import keyboard
################################################################
#vars
IsPressed = False
sleep = time.sleep
################################################################
#configs
print("Made By Cody666#5618, v1.0.3")
print("Enter Amount Of Clicks:")
kys = int(input())
sleep(0.1)
print("Enter Keybind")
keyhold = (input())
sleep(0.1)
print("If You Want To Change Any Setting You Have To Restart This Program")
################################################################
#loop function
def here():
print(r""" _______
/ |0|0| \
|___|___|
| |
| |
| |
| |
\_______/""")
print("Mouse Hehe")
################################################################
#main "loop?"
while True:
if not keyboard.is_pressed(keyhold):
IsPressed = False
while not IsPressed:
sleep(0.05)
if keyboard.is_pressed(keyhold):
sleep(0.05)
here()
IsPressed = True
pyautogui.click(None,None,kys)
################################################################
``` |
{
"source": "666zcli/models",
"score": 3
} |
#### File: image/cifar10/cifar10_train.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import time
import re
import math
import tensorflow as tf
import cifar10
import os
FLAGS = tf.app.flags.FLAGS
# tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
# """Directory where to write event logs """
# """and checkpoint.""")
tf.app.flags.DEFINE_string('train_dir', './Adam_finetune_bias_tuning_lr_0.0001_ti_150000_ellipse_weight_decay_0.015/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 150000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_float('weight_decay', 0.015,
"""Decay to learn quantized weights.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_integer('log_frequency', 1,
"""How often to log results to the console.""")
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default():
global_step = tf.train.get_or_create_global_step()
# Get images and labels for CIFAR-10.
# Force input pipeline to CPU:0 to avoid operations sometimes ending up on
# GPU and resulting in a slow down.
with tf.device('/cpu:0'):
images, labels = cifar10.distorted_inputs()
# Build a Graph that computes the logits predictions from the
# inference model.
logits = cifar10.inference(images)
# Calculate loss.
# loss = cifar10.loss(logits, labels)
cross_entropy, l2_loss = cifar10.loss(logits, labels)
# n = tf.constant(2.5)
# n = tf.constant(1.0)
# conv1_std_co = tf.constant(0.326984)
# conv2_std_co = tf.constant(0.099911)
# local3_std_co = tf.constant(0.010653)
# local4_std_co = tf.constant(0.015261)
# softmax_linear_std_co = tf.constant(0.222937)
# conv1_quan = tf.multiply(n, conv1_std_co)
# conv2_quan = tf.multiply(n, conv2_std_co)
# local3_quan = tf.multiply(n, local3_std_co)
# local4_quan = tf.multiply(n, local4_std_co)
# softmax_linear_quan = tf.multiply(n, softmax_linear_std_co)
# conv1_quan = tf.constant(0.15)
# conv2_quan = tf.constant(0.08)
# local3_quan = tf.constant(0.04)
# local4_quan = tf.constant(0.06)
# softmax_linear_quan = tf.constant(0.29)
s_conv1 = 0.8
s_conv2 = 1
s_local3 = 1
s_local4 = 1
s_softmax_linear = 1
conv1_quan2 = s_conv1 * tf.constant(0.125)
conv2_quan2 = s_conv2 * tf.constant(0.0625)
local3_quan2 = s_local3 * tf.constant(0.03125)
local4_quan2 = s_local4 * tf.constant(0.0625)
softmax_linear_quan2 = s_softmax_linear * tf.constant(0.125)
conv1_quan = s_conv1 * tf.constant(0.0625)
conv2_quan = s_conv2 * tf.constant(0.015625)
local3_quan = s_local3 * tf.constant(0.0078125)
local4_quan = s_local4 * tf.constant(0.03125)
softmax_linear_quan = s_softmax_linear * tf.constant(0.0625)
#mytrainable_list = []
for var in tf.trainable_variables():
# # mytrainable_list.append(var)
weights_pattern_conv1 = ".*conv1/weights$"
weights_pattern_conv2 = ".*conv2/weights$"
weights_pattern_local3 = ".*local3/weights$"
weights_pattern_local4 = ".*local4/weights$"
weights_pattern_softmax_linear = ".*local4/softmax_linear/weights$"
# bias_pattern = re.compile("(.*conv1/biases$)|(.*conv2/biases$)|(.*local3/biases$)|(.*local4/biases$)|(.*local4/softmax_linear/biases$)")
if re.compile(weights_pattern_conv1).match(var.op.name):
conv1_weights = var
# # mytrainable_list.append(var)
elif re.compile(weights_pattern_conv2).match(var.op.name):
conv2_weights = var
# # mytrainable_list.append(var)
elif re.compile(weights_pattern_local3).match(var.op.name):
local3_weights = var
# # mytrainable_list.append(var)
elif re.compile(weights_pattern_local4).match(var.op.name):
local4_weights = var
# # mytrainable_list.append(var)
elif re.compile(weights_pattern_softmax_linear).match(var.op.name):
softmax_linear_weights = var
# # mytrainable_list.append(var)
# elif bias_pattern.match(var.op.name):
# mytrainable_list.append(var)
# else:
# raise RuntimeError('Some variables are not matched!!!')
#tf.add_to_collection('mytrainable_list', mytrainable_list)
f1_conv1 = tf.sign(conv1_weights + conv1_quan2) * (conv1_weights + conv1_quan2)
f2_conv1 = tf.sign(conv1_weights + conv1_quan) * (conv1_weights + conv1_quan)
f3_conv1 = tf.sign(conv1_weights) * conv1_weights
f4_conv1 = tf.sign(conv1_weights - conv1_quan) * (conv1_weights - conv1_quan)
f5_conv1 = tf.sign(conv1_weights - conv1_quan2) * (conv1_weights - conv1_quan2)
'''
f1_conv2 = tf.sign(conv2_weights + conv2_quan) * (conv2_weights + conv2_quan)
f2_conv2 = tf.sign(conv2_weights) * conv2_weights
f3_conv2 = tf.sign(conv2_weights - conv2_quan) * (conv2_weights - conv2_quan)
f1_local3 = tf.sign(local3_weights + local3_quan) * (local3_weights + local3_quan)
f2_local3 = tf.sign(local3_weights) * local3_weights
f3_local3 = tf.sign(local3_weights - local3_quan) * (local3_weights - local3_quan)
f1_local4 = tf.sign(local4_weights + local4_quan) * (local4_weights + local4_quan)
f2_local4 = tf.sign(local4_weights) * local4_weights
f3_local4 = tf.sign(local4_weights - local4_quan) * (local4_weights - local4_quan)
f1_softmax_linear = tf.sign(softmax_linear_weights + softmax_linear_quan) * (softmax_linear_weights + softmax_linear_quan)
f2_softmax_linear = tf.sign(softmax_linear_weights) * softmax_linear_weights
f3_softmax_linear = tf.sign(softmax_linear_weights - softmax_linear_quan) * (softmax_linear_weights - softmax_linear_quan)
'''
f1_conv2 = tf.sign(conv2_weights + conv2_quan2) * (conv2_weights + conv2_quan2)
f2_conv2 = tf.sign(conv2_weights + conv2_quan) * (conv2_weights + conv2_quan)
f3_conv2 = tf.sign(conv2_weights) * conv2_weights
f4_conv2 = tf.sign(conv2_weights - conv2_quan) * (conv2_weights - conv2_quan)
f5_conv2 = tf.sign(conv2_weights - conv2_quan2) * (conv2_weights - conv2_quan2)
f1_local3 = tf.sign(local3_weights + local3_quan2) * (local3_weights + local3_quan2)
f2_local3 = tf.sign(local3_weights + local3_quan) * (local3_weights + local3_quan)
f3_local3 = tf.sign(local3_weights) * local3_weights
f4_local3 = tf.sign(local3_weights - local3_quan) * (local3_weights - local3_quan)
f5_local3 = tf.sign(local3_weights - local3_quan2) * (local3_weights - local3_quan2)
f1_local4 = tf.sign(local4_weights + local4_quan2) * (local4_weights + local4_quan2)
f2_local4 = tf.sign(local4_weights + local4_quan) * (local4_weights + local4_quan)
f3_local4 = tf.sign(local4_weights) * local4_weights
f4_local4 = tf.sign(local4_weights - local4_quan) * (local4_weights - local4_quan)
f5_local4 = tf.sign(local4_weights - local4_quan2) * (local4_weights - local4_quan2)
f1_softmax_linear = tf.sign(softmax_linear_weights + softmax_linear_quan2) * (softmax_linear_weights + softmax_linear_quan2)
f2_softmax_linear = tf.sign(softmax_linear_weights + softmax_linear_quan) * (softmax_linear_weights + softmax_linear_quan)
f3_softmax_linear = tf.sign(softmax_linear_weights) * softmax_linear_weights
f4_softmax_linear = tf.sign(softmax_linear_weights - softmax_linear_quan) * (softmax_linear_weights - softmax_linear_quan)
f5_softmax_linear = tf.sign(softmax_linear_weights - softmax_linear_quan2) * (softmax_linear_weights - softmax_linear_quan2)
conv1_regularizers = tf.where(tf.less(conv1_weights, -tf.add(tf.multiply(0.5, conv1_quan), tf.multiply(0.5, conv1_quan2))), f1_conv1,
tf.where(tf.less(conv1_weights, -tf.divide(conv1_quan, 2.0)), f2_conv1, tf.where(tf.less(conv1_weights, tf.divide(conv1_quan, 2.0)), f3_conv1,
tf.where(tf.less(conv1_weights, tf.add(tf.multiply(0.5, conv1_quan), tf.multiply(0.5, conv1_quan2))), f4_conv1, f5_conv1))))
'''
conv2_regularizers = tf.where(tf.less(conv2_weights, -tf.divide(conv2_quan, 2.0)), f1_conv2,
tf.where(tf.less(conv2_weights, tf.divide(conv2_quan, 2.0)), f2_conv2, f3_conv2))
local3_regularizers = tf.where(tf.less(local3_weights, -tf.divide(local3_quan, 2.0)), f1_local3,
tf.where(tf.less(local3_weights, tf.divide(local3_quan, 2.0)), f2_local3, f3_local3))
local4_regularizers = tf.where(tf.less(local4_weights, -tf.divide(local4_quan, 2.0)), f1_local4,
tf.where(tf.less(local4_weights, tf.divide(local4_quan, 2.0)), f2_local4, f3_local4))
softmax_linear_regularizers = tf.where(tf.less(softmax_linear_weights, -tf.divide(softmax_linear_quan, 2.0)), f1_softmax_linear,
tf.where(tf.less(softmax_linear_weights, tf.divide(softmax_linear_quan, 2.0)), f2_softmax_linear, f3_softmax_linear))
'''
conv2_regularizers = tf.where(tf.less(conv2_weights, -tf.add(tf.multiply(0.5, conv2_quan), tf.multiply(0.5, conv2_quan2))), f1_conv2,
tf.where(tf.less(conv2_weights, -tf.divide(conv2_quan, 2.0)), f2_conv2, tf.where(tf.less(conv2_weights, tf.divide(conv2_quan, 2.0)), f3_conv2,
tf.where(tf.less(conv2_weights, tf.add(tf.multiply(0.5, conv2_quan), tf.multiply(0.5, conv2_quan2))), f4_conv2, f5_conv2))))
local3_regularizers = tf.where(tf.less(local3_weights, -tf.add(tf.multiply(0.5, local3_quan), tf.multiply(0.5, local3_quan2))), f1_local3,
tf.where(tf.less(local3_weights, -tf.divide(local3_quan, 2.0)), f2_local3, tf.where(tf.less(local3_weights, tf.divide(local3_quan, 2.0)), f3_local3,
tf.where(tf.less(local3_weights, tf.add(tf.multiply(0.5, local3_quan), tf.multiply(0.5, local3_quan2))), f4_local3, f5_local3))))
local4_regularizers = tf.where(tf.less(local4_weights, -tf.add(tf.multiply(0.5, local4_quan), tf.multiply(0.5, local4_quan2))), f1_local4,
tf.where(tf.less(local4_weights, -tf.divide(local4_quan, 2.0)), f2_local4, tf.where(tf.less(local4_weights, tf.divide(local4_quan, 2.0)), f3_local4,
tf.where(tf.less(local4_weights, tf.add(tf.multiply(0.5, local4_quan), tf.multiply(0.5, local4_quan2))), f4_local4, f5_local4))))
softmax_linear_regularizers = tf.where(tf.less(softmax_linear_weights, -tf.add(tf.multiply(0.5, softmax_linear_quan), tf.multiply(0.5, softmax_linear_quan2))), f1_softmax_linear,
tf.where(tf.less(softmax_linear_weights, -tf.divide(softmax_linear_quan, 2.0)), f2_softmax_linear, tf.where(tf.less(softmax_linear_weights, tf.divide(softmax_linear_quan, 2.0)), f3_softmax_linear,
tf.where(tf.less(softmax_linear_weights, tf.add(tf.multiply(0.5, softmax_linear_quan), tf.multiply(0.5, softmax_linear_quan2))), f4_softmax_linear, f5_softmax_linear))))
quantify_regularizers = (tf.reduce_sum(conv1_regularizers)+
tf.reduce_sum(conv2_regularizers)+
tf.reduce_sum(local3_regularizers)+
tf.reduce_sum(local4_regularizers)+
tf.reduce_sum(softmax_linear_regularizers)
)
# # a changes with a square root of cosine function
# a = tf.Variable(1., trainable=False, name='a')
# tf.summary.scalar(a.op.name, a)
# PI = tf.constant(math.pi)
# a = tf.assign(a, tf.sqrt(0.5*(1.0+tf.cos(tf.divide(PI,FLAGS.max_steps)*tf.cast(global_step,tf.float32)))+1e-8))
# a changes with a straight line
# a = tf.assign(a, tf.add(tf.multiply(tf.divide(-1.0, (int(num_epochs * train_size) // BATCH_SIZE)),batch), 1))
# a changes with a ellipse and sets to 0 at the final 5000 steps (N is the final steps to be set to 0)
# N = tf.constant(5000)
# a = tf.cond(tf.less(global_step, tf.cast(FLAGS.max_steps - N, tf.int64)), lambda:tf.assign(a,tf.cast(tf.sqrt(1.0-tf.divide(tf.cast(tf.square(global_step),tf.int32), tf.square(FLAGS.max_steps))), tf.float32)),lambda:tf.assign(a, 0.))
# a changes with a cosine function
a = tf.Variable(1., trainable=False, name='a')
tf.summary.scalar(a.op.name, a)
PI = tf.constant(math.pi)
a = tf.assign(a, 0.5 * (1.0 + tf.cos(tf.divide(PI, FLAGS.max_steps) * tf.cast(global_step, tf.float32))) + 1e-8)
# a changes with a cosine function sets to 0 at the final 5000 steps (N is the final steps to be set to 0)
# a = tf.Variable(1., trainable=False, name='a')
# tf.summary.scalar(a.op.name, a)
# N = tf.constant(5000)
# PI = tf.constant(math.pi)
# a = tf.cond(tf.less(global_step, tf.cast(FLAGS.max_steps - N, tf.int64)), lambda:tf.assign(a, 0.5 * (1.0 + tf.cos(tf.divide(PI, FLAGS.max_steps) * tf.cast(global_step, tf.float32))) + 1e-8) ,
# lambda: tf.assign(a, 0.))
# b = tf.Variable(0.5, trainable=False, name='b')
# tf.summary.scalar(b.op.name, b)
# b = tf.assign(b, tf.random_uniform([], 0., 1.))
# deformable_regularizers = tf.where(tf.less(b, a), l2_loss, quantify_regularizers)
# DECAY = tf.constant(0.012)
deformable_regularizers = a * l2_loss + (1 - a) * quantify_regularizers
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
# train_op = cifar10.train(loss, global_step)
# total_loss = cross_entropy + 0.001 * l2_loss
total_loss = cross_entropy+FLAGS.weight_decay*deformable_regularizers
# total_loss = cross_entropy + 0.001 * quantify_regularizers
train_op = cifar10.train(total_loss, global_step)
tf.summary.scalar('total_loss', total_loss)
tf.summary.scalar('cross_entropy', cross_entropy)
tf.summary.scalar('DECAY*deformable_regularizers', tf.multiply(FLAGS.weight_decay, deformable_regularizers))
# for var in tf.trainable_variables():
# pattern = ".*weights.*"
# if re.compile(pattern).match(var.op.name):
# tf.summary.histogram(var.op.name, var)
class _LoggerHook(tf.train.SessionRunHook):
"""Logs loss and runtime."""
def begin(self):
self._step = -1
self._start_time = time.time()
def before_run(self, run_context):
self._step += 1
return tf.train.SessionRunArgs(total_loss) # Asks for loss value.
def after_run(self, run_context, run_values):
if self._step % FLAGS.log_frequency == 0:
current_time = time.time()
duration = current_time - self._start_time
self._start_time = current_time
loss_value = run_values.results
examples_per_sec = FLAGS.log_frequency * FLAGS.batch_size / duration
sec_per_batch = float(duration / FLAGS.log_frequency)
format_str = ('%s: step %d, total_loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print (format_str % (datetime.now(), self._step, loss_value,
examples_per_sec, sec_per_batch))
# for var in tf.trainable_variables():
# tf.summary.histogram(var.op.name, var)
config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
config.gpu_options.allow_growth = True
print_op = tf.no_op()
# for var in tf.trainable_variables():
# weights_pattern = ".*weights.*"
# if re.compile(weights_pattern).match(var.op.name):
# ini_mean, ini_variance = tf.nn.moments(tf.reshape(var, [-1]), [0])
# ini_std = tf.sqrt(ini_variance)
# print_var = tf.Print(var.op.name, [var.op.name, ini_std], var.op.name)
# # print_ini_op0 = tf.Print(ini_std, [ini_std], 'ini_std')
# print_op = tf.group(print_op, print_var)
# with tf.control_dependencies([print_op]):
# train_op = tf.group(train_op, tf.no_op())
#
# if global_step % 1 == 0:
# # summary_str = sess.run(summary_op)
# with tf.control_dependencies([summary_op]):
# train_op = tf.group(train_op, tf.no_op())
# summary_writer.add_summary(summary_str, global_step)
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
# Build the summary operation from the last tower summaries.
summary_op = tf.summary.merge(summaries)
summary_writer = tf.summary.FileWriter(
FLAGS.train_dir,
graph=tf.get_default_graph())
with tf.Session(config=config) as sess:
#saver = tf.train.import_meta_graph('./tb_no_quantization_baseline_300000/cifar10_train/model.ckpt-300000.meta')
sess.run(tf.global_variables_initializer())
var_dic = {}
_vars = tf.global_variables()
for _var in _vars:
pattern = re.compile("(.*conv1/weights$)|(.*conv2/weights$)|(.*local3/weights$)|(.*local4/weights$)|(.*local4/softmax_linear/weights$)|(.*conv1/biases$)|(.*conv2/biases$)|(.*local3/biases$)|(.*local4/biases$)|(.*local4/softmax_linear/biases$)|(.*MovingAverage$)")
if pattern.match(_var.op.name) :
_var_name = _var.op.name
var_dic[_var_name] = _var
saver = tf.train.Saver(var_dic)
#saver.restore(sess, "./pretrain_baseline_0.872_lr_0.0002_wd_0.001_ti_500000/cifar10_train/model.ckpt-500000")
# saver.restore(sess, "./Adam_finetune_conv1_lr_0.00005_wd_0.01_ti_150000_aL1/cifar10_train/model.ckpt-150000")
# saver.restore(sess, "./Adam_finetune_conv1_lr_0.00005_wd_0.01_ti_150000_ellipse/cifar10_train/model.ckpt-150000")
# saver.restore(sess, "./Adam_finetune_conv1_lr_0.00005_wd_0.01_ti_150000_Bernoulli/cifar10_train/model.ckpt-150000")
# saver.restore(sess,"./Adam_finetune_freeze_conv1_conv2_0.006_lr_0.0001_ti_121000_Bernoulli_v2/cifar10_train/model.ckpt-121000")
# saver.restore(sess,"./Adam_finetune_freeze_conv1_conv2_0.006_lr_0.0001_ti_150000_ellipse/cifar10_train/model.ckpt-150000")
# saver.restore(sess,"./Adam_finetune_freeze_conv1_conv2_0.006_lr_0.0001_ti_121000_Bernoulli_v3/cifar10_train/model.ckpt-121000")
# saver.restore(sess,"./Adam_finetune_freeze_conv12_local3_0.003_lr_0.0001_ti_121000_Bernoulli/cifar10_train/model.ckpt-121000")
# saver.restore(sess,"./Adam_finetune_freeze_conv12_local3_0.002_lr_0.0001_ti_150000_ellipse/cifar10_train/model.ckpt-150000")
# saver.restore(sess,"./Adam_finetune_freeze_conv12_local3_0.002_lr_0.0001_ti_121000_Bernoulli_v3/cifar10_train/model.ckpt-121000")
# saver.restore(sess,"./Adam_finetune_freeze_conv12local3_local4_0.01_lr_0.00005_ti_121000_Bernoulli/cifar10_train/model.ckpt-121000")
# saver.restore(sess,"./Adam_finetune_freeze_conv12local3_local4_0.004_lr_0.0001_ti_121000_Bernoulli_v3/cifar10_train/model.ckpt-121000")
# saver.restore(sess,"./Adam_finetune_freeze_conv12local3_local4_0.004_lr_0.0001_ti_150000_ellipse/cifar10_train/model.ckpt-150000")
# saver.restore(sess,"./Adam_finetune_freeze_conv12local3_local4_0.008_lr_0.00005_ti_121000_Bernoulli_v3/cifar10_train/model.ckpt-121000")
# saver.restore(sess,"./Adam_finetune_freeze_conv12local34_softmax_0.002_lr_0.00005_ti_121000_Bernoulli_v3/cifar10_train/model.ckpt-121000")
#saver.restore(sess,"./Adam_finetune_bias_tuning_lr_0.00005_ti_150000_ellipse_v1/cifar10_train/model.ckpt-150000")
#saver.restore(sess,"./Adam_finetune_bias_tuning_lr_0.00005_ti_150000_ellipse/cifar10_train/model.ckpt-300000.meta")
#saver.restore(sess, "./tb_no_quantization_baseline_300000/cifar10_train/model.ckpt-300000")
saver.restore(sess, "./origian/cifar10_train/model.ckpt-150000")
# Start the queue runners.
coord = tf.train.Coordinator()
# threads = tf.train.start_queue_runners(sess=sess, coord=coord)
tf.train.start_queue_runners(sess=sess, coord=coord)
saver = tf.train.Saver()
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
# coord.request_stop()
# coord.join(threads)
for step in range(FLAGS.max_steps+1):
# for step in range(1):
if step % 1000 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
_, mloss = sess.run([train_op, total_loss])
print('step {}: total loss {}'.format(step, mloss))
if step%1000==0:
saver.save(sess, checkpoint_path, global_step=step)
# with tf.train.MonitoredTrainingSession(
# save_summaries_steps=10,
# checkpoint_dir=FLAGS.train_dir,
# hooks=[tf.train.StopAtStepHook(last_step=FLAGS.max_steps),
# tf.train.NanTensorHook(total_loss),
# _LoggerHook()],
# config=config) as mon_sess:
# while not mon_sess.should_stop():
# mon_sess.run(train_op)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
``` |
{
"source": "66chenbiao/BI_learn",
"score": 3
} |
#### File: week3/homework/delicious_tagbased_tfidf.py
```python
"""
使用TagBased-TFIDF算法对Delicious2K数据进行推荐
原始数据集:https://grouplens.org/datasets/hetrec-2011/
数据格式:userID bookmarkID tagID timestamp
"""
import random
import math
import operator
import pandas as pd
file_path = "user_taggedbookmarks-timestamps.dat"
# 字典类型,保存了user对item的tag,即{userid: {item1:[tag1, tag2], ...}}
records = {}
# 训练集,测试集
train_data = dict()
test_data = dict()
# 用户标签,商品标签
user_tags = dict()
tag_users = dict()
tag_items = dict()
user_items = dict()
# 数据加载,records
def load_data():
print("开始数据加载...")
df = pd.read_csv(file_path, sep='\t')#以\t为数据分隔符
print(df.head(5))
for i in range(len(df)):
uid = df['userID'][i]
iid = df['bookmarkID'][i]
tag = df['tagID'][i]
# 键不存在时,设置默认值{}
records.setdefault(uid,{})
records[uid].setdefault(iid,[])
records[uid][iid].append(tag)
print("数据集大小为 %d." % (len(df)))
print("设置tag的人数 %d." % (len(records)))
print("数据加载完成\n")
# 将数据集拆分为训练集和测试集
def train_test_split(ratio, seed=100):
random.seed(seed) #设置随机数生成器的种子
for u in records.keys():
for i in records[u].keys():
# ratio比例设置为测试集
if random.random()<ratio:
test_data.setdefault(u,{})
test_data[u].setdefault(i,[])
for t in records[u][i]:
test_data[u][i].append(t)
else:
train_data.setdefault(u,{})
train_data[u].setdefault(i,[])
for t in records[u][i]:
train_data[u][i].append(t)
print("训练集样本数 %d, 测试集样本数 %d" % (len(train_data),len(test_data)))
# 设置矩阵 mat[index, item] = 1
def addValueToMat(mat, index, item, value=1):
if index not in mat:
mat.setdefault(index,{})
mat[index].setdefault(item,value)
else:
if item not in mat[index]:
mat[index][item] = value
else:
mat[index][item] += value
# 使用训练集,初始化user_tags, tag_items, user_items
def initStat():
records=train_data
for u,items in records.items():
for i,tags in items.items():
for tag in tags:
#print tag
# 用户和tag的关系
addValueToMat(user_tags, u, tag, 1)
# tag和用户的关系
addValueToMat(tag_users, tag, u, 1)
# tag和item的关系
addValueToMat(tag_items, tag, i, 1)
# 用户和item的关系
addValueToMat(user_items, u, i, 1)
print("user_tags, tag_items, user_items,tag_users初始化完成.")
print("user_tags大小 %d, tag_items大小 %d, user_items大小 %d, tag_users大小 %d" % (len(user_tags), len(tag_items), len(user_items),len(tag_users)))
# 对用户user推荐Top-N
def recommend(user, N):
recommend_item=dict()
# 对Item进行打分,分数为所有的(用户对某标签使用的次数 utn, 乘以 商品被打上相同标签的次数 utn, 除以( 1加上所有用户打某标签的总数 )的自然对数)之和
tagged_items = user_items[user]
for tag, utn in user_tags[user].items():
for item, wti in tag_items[tag].items():
if item in tagged_items:
continue
if item not in recommend_item:
recommend_item[item] = utn * utn / math.log(1 + sum(tag_users[tag].values()))
else:
recommend_item[item] += utn * utn / math.log(1 + sum(tag_users[tag].values()))
return sorted(recommend_item.items(), key=operator.itemgetter(1), reverse=True)[0:N]
# 使用测试集,计算准确率和召回率
def precisionAndRecall(N):
hit = 0
h_recall = 0
h_precision = 0
for user,items in test_data.items():
if user not in train_data: # 对user不属于train_data的跳过
continue
# 获取Top-N推荐列表
rank = recommend(user, N)
for item,rui in rank:
if item in items:
hit = hit + 1
h_recall = h_recall + len(items)
h_precision = h_precision + N
#print('一共命中 %d 个, 一共推荐 %d 个, 用户设置tag总数 %d 个' %(hit, h_precision, h_recall))
# 返回准确率 和 召回率
return (hit/(h_precision*1.0)), (hit/(h_recall*1.0))
# 使用测试集,对推荐结果进行评估
def testRecommend():
print("推荐结果评估")
print("%3s %10s %10s" % ('N',"精确率",'召回率'))
for n in [5,10,20,40,60,80,100]:
precision,recall = precisionAndRecall(n)
print("%3d %10.3f%% %10.3f%%" % (n, precision * 100, recall * 100))
# 数据加载
load_data()
# 训练集,测试集拆分,20%测试集
train_test_split(0.2)
initStat()
testRecommend()
```
#### File: BI_learn/week4/use_als.py
```python
from collections import defaultdict
from random import random
from itertools import product, chain
from time import time
def load_movie_ratings():
f = open("../data/ratings.csv")
lines = iter(f)
col_names = ", ".join(next(lines)[:-1].split(",")[:-1])
print("The column names are: %s." % col_names)
data = [[float(x) if i == 2 else int(x)
for i, x in enumerate(line[:-1].split(",")[:-1])]
for line in lines]
f.close()
return data
class Matrix(object):
def __init__(self, data):
self.data = data
self.shape = (len(data), len(data[0]))
def row(self, row_no):
return Matrix([self.data[row_no]])
def col(self, col_no):
m = self.shape[0]
return Matrix([[self.data[i][col_no]] for i in range(m)])
@property
def is_square(self):
return self.shape[0] == self.shape[1]
@property
def transpose(self):
data = list(map(list, zip(*self.data)))
return Matrix(data)
# 生成一个长度为n的单位阵
def _eye(self, n):
return [[0 if i != j else 1 for j in range(n)] for i in range(n)]
@property
def eye(self):
assert self.is_squre, "The matrix has to be squre"
data = self._eye(self.shape[0])
return Matrix(data)
# 高斯消元
def gaussian_elimination(self, aug_matrix):
n = len(aug_matrix)
m = len(aug_matrix[0])
# From top to bottom.
for col_idx in range(n):
# Check if element on the diagonal is zero.
if aug_matrix[col_idx][col_idx] == 0:
row_idx = col_idx
# Find a row whose element has same column index with
# the element on the diagonal is not zero.
while row_idx < n and aug_matrix[row_idx][col_idx] == 0:
row_idx += 1
# Add this row to the row of the element on the diagonal.
for i in range(col_idx, m):
aug_matrix[col_idx][i] += aug_matrix[row_idx][i]
# Elimiate the non-zero element.
for i in range(col_idx + 1, n):
# Skip the zero element.
if aug_matrix[i][col_idx] == 0:
continue
# Elimiate the non-zero element.
k = aug_matrix[i][col_idx] / aug_matrix[col_idx][col_idx]
for j in range(col_idx, m):
aug_matrix[i][j] -= k * aug_matrix[col_idx][j]
# From bottom to top.
for col_idx in range(n - 1, -1, -1):
# Elimiate the non-zero element.
for i in range(col_idx):
# Skip the zero element.
if aug_matrix[i][col_idx] == 0:
continue
# Elimiate the non-zero element.
k = aug_matrix[i][col_idx] / aug_matrix[col_idx][col_idx]
for j in chain(range(i, col_idx + 1), range(n, m)):
aug_matrix[i][j] -= k * aug_matrix[col_idx][j]
# Iterate the element on the diagonal.
for i in range(n):
k = 1 / aug_matrix[i][i]
aug_matrix[i][i] *= k
for j in range(n, m):
aug_matrix[i][j] *= k
return aug_matrix
# 矩阵求逆
def _inverse(self, data):
n = len(data)
unit_matrix = self._eye(n)
aug_matrix = [a + b for a, b in zip(self.data, unit_matrix)]
ret = self.gaussian_elimination(aug_matrix)
return list(map(lambda x: x[n:], ret))
# 矩阵求逆,原理:https://baike.baidu.com/item/%E9%AB%98%E6%96%AF%E6%B6%88%E5%85%83%E6%B3%95/619561?fr=aladdin
@property
def inverse(self):
assert self.is_square, "The matrix has to be square!"
data = self._inverse(self.data)
return Matrix(data)
def row_mul(self, row_A, row_B):
return sum(x[0] * x[1] for x in zip(row_A, row_B))
def _mat_mul(self, row_A, B):
row_pairs = product([row_A], B.transpose.data)
return [self.row_mul(*row_pair) for row_pair in row_pairs]
def mat_mul(self, B):
assert self.shape[1] == B.shape[0], "A's column count does not match B's row count!"
return Matrix([self._mat_mul(row_A, B) for row_A in self.data])
def _mean(self, data):
m = len(data)
n = len(data[0])
ret = [0 for _ in range(n)]
for row in data:
for j in range(n):
ret[j] += row[j] / m
return ret
def mean(self, data):
return Matrix(self._mean(self.data))
# 统计程序运行时间函数
# fn代表运行的函数
def run_time(fn):
def fun():
start = time()
fn()
ret = time() - start
if ret < 1e-6:
unit = "ns"
ret *= 1e9
elif ret < 1e-3:
unit = "us"
ret *= 1e6
elif ret < 1:
unit = "ms"
ret *= 1e3
else:
unit = "s"
print("Total run time is %.1f %s\n" % (ret, unit))
return fun()
class ALS(object):
# 初始化,存储用户ID、物品ID、用户ID与用户矩阵列号的对应关系、物品ID
# 与物品矩阵列号的对应关系、用户已经看过哪些物品、评分矩阵的Shape以及RMSE
def __init__(self):
self.user_ids = None
self.item_ids = None
self.user_ids_dict = None
self.item_ids_dict = None
self.user_matrix = None
self.item_matrix = None
self.user_items = None
self.shape = None
self.rmse = None
# 对训练数据进行处理,得到用户ID、物品ID、用户ID与用户矩阵列号的对应关系、物
# 品ID与物品矩阵列号的对应关系、评分矩阵的Shape、评分矩阵及评分矩阵的转置。
def process_data(self, X):
self.user_ids = tuple((set(map(lambda x: x[0], X))))
self.user_ids_dict = dict(map(lambda x: x[::-1], enumerate(self.user_ids)))
self.item_ids = tuple((set(map(lambda x: x[1], X))))
self.item_ids_dict = dict(map(lambda x: x[::-1], enumerate(self.item_ids)))
self.shape = (len(self.user_ids), len(self.item_ids))
ratings = defaultdict(lambda : defaultdict(int))
ratings_T = defaultdict(lambda : defaultdict(int))
for row in X:
user_id, item_id, rating = row
ratings[user_id][item_id] = rating
ratings_T[item_id][user_id] = rating
err_msg = "Length of user_ids %d and ratings %d not match!" % (
len(self.user_ids), len(ratings))
assert len(self.user_ids) == len(ratings), err_msg
err_msg = "Length of item_ids %d and ratings_T %d not match!" % (
len(self.item_ids), len(ratings_T))
assert len(self.item_ids) == len(ratings_T), err_msg
return ratings, ratings_T
# 用户矩阵乘以评分矩阵,实现稠密矩阵与稀疏矩阵的矩阵乘法,得到用户矩阵与评分矩阵的乘积。
def users_mul_ratings(self, users, ratings_T):
def f(users_row, item_id):
user_ids = iter(ratings_T[item_id].keys())
scores = iter(ratings_T[item_id].values())
col_nos = map(lambda x: self.user_ids_dict[x], user_ids)
_users_row = map(lambda x: users_row[x], col_nos)
return sum(a * b for a, b in zip(_users_row, scores))
ret = [[f(users_row, item_id) for item_id in self.item_ids]
for users_row in users.data]
return Matrix(ret)
# 物品矩阵乘以评分矩阵,实现稠密矩阵与稀疏矩阵的矩阵乘法,得到物品矩阵与评分矩阵的乘积。
def items_mul_ratings(self, items, ratings):
def f(items_row, user_id):
item_ids = iter(ratings[user_id].keys())
scores = iter(ratings[user_id].values())
col_nos = map(lambda x: self.item_ids_dict[x], item_ids)
_items_row = map(lambda x: items_row[x], col_nos)
return sum(a * b for a, b in zip(_items_row, scores))
ret = [[f(items_row, user_id) for user_id in self.user_ids]
for items_row in items.data]
return Matrix(ret)
# 生成随机矩阵
def gen_random_matrix(self, n_rows, n_colums):
data = [[random() for _ in range(n_colums)] for _ in range(n_rows)]
return Matrix(data)
# 计算RMSE
def get_rmse(self, ratings):
m, n = self.shape
mse = 0.0
n_elements = sum(map(len, ratings.values()))
for i in range(m):
for j in range(n):
user_id = self.user_ids[i]
item_id = self.item_ids[j]
rating = ratings[user_id][item_id]
if rating > 0:
user_row = self.user_matrix.col(i).transpose
item_col = self.item_matrix.col(j)
rating_hat = user_row.mat_mul(item_col).data[0][0]
square_error = (rating - rating_hat) ** 2
mse += square_error / n_elements
return mse ** 0.5
# 训练模型
# 1.数据预处理
# 2.变量k合法性检查
# 3.生成随机矩阵U
# 4.交替计算矩阵U和矩阵I,并打印RMSE信息,直到迭代次数达到max_iter
# 5.保存最终的RMSE
def fit(self, X, k, max_iter=10):
ratings, ratings_T = self.process_data(X)
self.user_items = {k: set(v.keys()) for k,v in ratings.items()}
m, n = self.shape
error_msg = "Parameter k must be less than the rank of original matrix"
assert k < min(m, n), error_msg
self.user_matrix = self.gen_random_matrix(k, m)
for i in range(max_iter):
if i % 2:
items = self.item_matrix
self.user_matrix = self.items_mul_ratings(
items.mat_mul(items.transpose).inverse.mat_mul(items),
ratings
)
else:
users = self.user_matrix
self.item_matrix = self.users_mul_ratings(
users.mat_mul(users.transpose).inverse.mat_mul(users),
ratings_T
)
rmse = self.get_rmse(ratings)
print("Iterations: %d, RMSE: %.6f" % (i + 1, rmse))
self.rmse = rmse
# 预测一个用户
def _predict(self, user_id, n_items):
users_col = self.user_matrix.col(self.user_ids_dict[user_id])
users_col = users_col.transpose
items_col = enumerate(users_col.mat_mul(self.item_matrix).data[0])
items_scores = map(lambda x: (self.item_ids[x[0]], x[1]), items_col)
viewed_items = self.user_items[user_id]
items_scores = filter(lambda x: x[0] not in viewed_items, items_scores)
return sorted(items_scores, key=lambda x: x[1], reverse=True)[:n_items]
# 预测多个用户
def predict(self, user_ids, n_items=10):
return [self._predict(user_id, n_items) for user_id in user_ids]
def format_prediction(item_id, score):
return "item_id:%d score:%.2f" % (item_id, score)
@run_time
def main():
print("Tesing the accuracy of ALS...")
X = load_movie_ratings()
model = ALS()
model.fit(X, k=3, max_iter=5)
print("Showing the predictions of users...")
user_ids = range(1, 5)
predictions = model.predict(user_ids, n_items=2)
for user_id, prediction in zip(user_ids, predictions):
_prediction = [format_prediction(item_id, score)
for item_id, score in prediction]
print("User id:%d recommedation: %s" % (user_id, _prediction))
``` |
{
"source": "66chenbiao/sleepace_verification_tool",
"score": 4
} |
#### File: sleepace_verification_tool/data-structure/queue.py
```python
import unittest
class Queue:
"""Simple Queue implementation - First in First Out"""
def __init__(self):
self.__data = []
def enqueue(self, text):
"""Add new element to queue
Arguments:
text {string} -- An element which needs to be added to an end of a queue
"""
self.__data.append(text)
def dequeue(self):
"""Gets a first element in a front of a queue
Returns:
string -- A first element in a front of a queue
"""
if len(self.__data) == 0:
return None
taken = self.__data[0]
new_queue = []
for index in range(1, len(self.__data)):
new_queue.append(self.__data[index])
self.__data = new_queue
return taken
def front(self):
"""Checks a first element in a front of a queue
Returns:
string -- A first element in a front of a queue
"""
if len(self.__data) == 0:
return None
return self.__data[0]
def rear(self):
"""Checks a last element in a queue
Returns:
string -- A last element in a queue
"""
if len(self.__data) == 0:
return None
return self.__data[-1]
class QueueTest(unittest.TestCase):
def test_empty_queue(self):
queue = Queue()
self.assertIsNone(queue.front())
self.assertIsNone(queue.rear())
self.assertIsNone(queue.dequeue())
def test_add_one(self):
queue = Queue()
queue.enqueue("one")
self.assertEqual(queue.front(), "one", "Should be 'one'")
self.assertEqual(queue.rear(), "one", "Should be 'one'")
def test_add_three(self):
queue = Queue()
queue.enqueue("one")
queue.enqueue("two")
queue.enqueue("three")
self.assertEqual(queue.front(), "one", "Should be 'one'")
self.assertEqual(queue.rear(), "three", "Should be 'three'")
def test_add_three_get_one(self):
queue = Queue()
queue.enqueue("one")
queue.enqueue("two")
queue.enqueue("three")
taken = queue.dequeue()
self.assertEqual(queue.front(), "two", "Should be 'two'")
self.assertEqual(queue.rear(), "three", "Should be 'three'")
self.assertEqual(taken, "one", "Should be 'one'")
if __name__ == "__main__":
unittest.main()
```
#### File: science/chemistry/organic.py
```python
import re
import unittest
def carbon_content(formula):
"""Get carbon mass concentration in the compound
Arguments:
formula {String} -- Formula of the organic compound
(should include only C, H or O atoms)
Returns:
float -- Carbon mass concentration ratio
"""
weight = {"H": 1, "C": 12, "O": 16}
carbons = __get_carbons(formula)
hydrogens = __get_hydrogens(formula)
oxygens = 0
if not (re.search("O", formula) is None):
oxygens = __get_oxygens(formula)
return float(
format(
100.0
* carbons
* weight.get("C")
/ (
carbons * weight.get("C")
+ hydrogens * weight.get("H")
+ oxygens * weight.get("O")
),
".1f",
)
)
def hydrocarbon_class(formula):
"""Indicates if provided organic compound is a hydrocarbon
Arguments:
formula {String} -- Formula of the organic compound
Returns:
String -- Indicates what type of hydrocarbon is the compound
(alkane/alkene/alkyne)
"""
if __is_hydrocarbon(formula):
hydrogens = __get_hydrogens(formula)
carbons = __get_carbons(formula)
if __is_alkane(carbons, hydrogens):
return "%s is an alkane" % (formula)
elif __is_alkene(carbons, hydrogens):
return "%s is an alkene" % (formula)
elif __is_alkyne(carbons, hydrogens):
return "%s is an alkyne" % (formula)
return "%s is not a hydrocarbon" % (formula)
def __is_hydrocarbon(formula):
pattern = "C\\d*H\\d+"
if re.match(pattern, formula):
carbons_match = re.match("C\\d*", formula)
hydrogens_match = re.search("(H\\d+)$", formula)
if not (carbons_match is None) and not (hydrogens_match is None):
return True
return False
return False
def __is_alkane(carbons, hydrogens):
if 2 * carbons + 2 == hydrogens:
return True
return False
def __is_alkene(carbons, hydrogens):
if 2 * carbons == hydrogens:
return True
return False
def __is_alkyne(carbons, hydrogens):
if 2 * carbons - 2 == hydrogens:
return True
return False
def __get_carbons(formula):
return (
int(re.match("C\\d*", formula).group()[1:]) if re.match("C\\d+", formula) else 1
)
def __get_hydrogens(formula):
return int(re.search("(H\\d+)", formula).group()[1:])
def __get_oxygens(formula):
return (
int(re.search("(O\\d+)", formula).group()[1:])
if re.match("O\\d+", formula)
else 1
)
class OrganicTest(unittest.TestCase):
def test_carbon_content(self):
self.assertEqual(carbon_content("C2H6"), 80.0)
self.assertEqual(carbon_content("C2H6O"), 52.2)
def test_hydrocarbon_class(self):
self.assertEqual(hydrocarbon_class("DCax"), "DCax is not a hydrocarbon")
self.assertEqual(hydrocarbon_class("CH"), "CH is not a hydrocarbon")
self.assertEqual(hydrocarbon_class("C2H6"), "C2H6 is an alkane")
self.assertEqual(hydrocarbon_class("C6H14"), "C6H14 is an alkane")
self.assertEqual(hydrocarbon_class("C2H4"), "C2H4 is an alkene")
self.assertEqual(hydrocarbon_class("C5H10"), "C5H10 is an alkene")
self.assertEqual(hydrocarbon_class("C2H2"), "C2H2 is an alkyne")
self.assertEqual(hydrocarbon_class("C8H14"), "C8H14 is an alkyne")
if __name__ == "__main__":
unittest.main()
``` |
{
"source": "66eli77/kolibri-ui-tracking-plugin",
"score": 2
} |
#### File: kolibri-ui-tracking-plugin/ui_tracker/views.py
```python
from __future__ import absolute_import, print_function, unicode_literals
import urllib
import os
from django.http import HttpResponse
def UITrackerView(request):
# import pdb; pdb.set_trace();
path = os.path.dirname(os.path.realpath(__file__))+'/ui_tracking.txt'
with open(path, "a") as myfile:
myfile.write(urllib.unquote('{'+request.META['QUERY_STRING'])+'}')
return HttpResponse(status=204)
``` |
{
"source": "66ru/yandex-maps",
"score": 3
} |
#### File: yandex-maps/yandex_maps/api.py
```python
import xml.dom.minidom
import urllib
from yandex_maps import http
STATIC_MAPS_URL = 'https://static-maps.yandex.ru/1.x/?'
HOSTED_MAPS_URL = 'https://maps.yandex.ru/?'
GEOCODE_URL = 'https://geocode-maps.yandex.ru/1.x/?'
def _format_point(longitude, latitude):
return '%0.7f,%0.7f' % (float(longitude), float(latitude),)
def get_map_url(api_key, longitude, latitude, zoom, width, height):
''' returns URL of static yandex map '''
point = _format_point(longitude, latitude)
params = [
'll=%s' % point,
'size=%d,%d' % (width, height,),
'z=%d' % zoom,
'l=map',
'pt=%s' % point,
'key=%s' % api_key
]
return STATIC_MAPS_URL + '&'.join(params)
def get_external_map_url(longitude, latitude, zoom=14):
''' returns URL of hosted yandex map '''
point = _format_point(longitude, latitude)
params = dict(
ll = point,
pt = point,
l = 'map',
)
if zoom is not None:
params['z'] = zoom
return HOSTED_MAPS_URL + urllib.urlencode(params)
def geocode(api_key, address, timeout=2):
''' returns (longtitude, latitude,) tuple for given address '''
try:
xml = _get_geocode_xml(api_key, address, timeout)
return _get_coords(xml)
except IOError:
return None, None
def _get_geocode_xml(api_key, address, timeout=2):
url = _get_geocode_url(api_key, address)
status_code, response = http.request('GET', url, timeout=timeout)
return response
def _get_geocode_url(api_key, address):
if isinstance(address, unicode):
address = address.encode('utf8')
params = urllib.urlencode({'geocode': address, 'key': api_key})
return GEOCODE_URL + params
def _get_coords(response):
try:
dom = xml.dom.minidom.parseString(response)
pos_elem = dom.getElementsByTagName('pos')[0]
pos_data = pos_elem.childNodes[0].data
return tuple(pos_data.split())
except IndexError:
return None, None
```
#### File: yandex-maps/yandex_maps/models.py
```python
from django.db import models
from django.conf import settings
from yandex_maps import api
YANDEX_KEY = getattr(settings, 'YANDEX_MAPS_API_KEY', None)
def get_static_map_url(longitude, latitude, width=None, height=None, detail_level=14):
"""
Возвращает адрес статичной карты с учетом настроек в settings.py
"""
w = int(width) if width else settings.YANDEX_MAPS_W
h = int(height) if height else settings.YANDEX_MAPS_H
detail_level = int(detail_level)
return api.get_map_url(YANDEX_KEY, longitude, latitude, detail_level, w, h)
class MapAndAddress(models.Model):
address = models.CharField(u'Адрес', max_length=255, blank=True, db_index=True)
longitude = models.FloatField(u'Долгота', null=True, blank=True)
latitude = models.FloatField(u'Широта', null=True, blank=True)
def get_detail_level(self):
return 5
def get_map_url(self, width=None, height=None, detail_level = 5):
if YANDEX_KEY is None:
return ""
return get_static_map_url(self.longitude, self.latitude, width, height, detail_level)
def get_external_map_url(self, detail_level=14):
return api.get_external_map_url(self.longitude, self.latitude, detail_level)
def fill_geocode_data(self):
if YANDEX_KEY is not None:
self.longitude, self.latitude = api.geocode(settings.YANDEX_MAPS_API_KEY, self.address)
def save(self, *args, **kwargs):
# fill geocode data if it is unknown
if self.pk or (self.longitude is None) or (self.latitude is None):
self.fill_geocode_data()
super(MapAndAddress, self).save(*args, **kwargs)
def __unicode__(self):
return self.address
``` |
{
"source": "66Volts/SublimeTextXdebug",
"score": 3
} |
#### File: xdebug/helper/helper_27.py
```python
import base64
from urllib import unquote, quote
from collections import OrderedDict
def modulename():
return "Helper module for Python version 2.7"
def url_decode(uri):
return unquote(uri)
def url_encode(uri):
return quote(uri)
def new_dictionary():
return OrderedDict()
def dictionary_keys(dictionary):
return list(dictionary.keys())
def dictionary_values(dictionary):
return list(dictionary.values())
def data_read(data):
# Data for reading/receiving already a string in version 2.*
return data
def data_write(data):
# Using string in version 2.* for sending/writing data
return data
def base64_decode(data):
return base64.b64decode(data)
def base64_encode(data):
return base64.b64encode(data)
def unicode_chr(code):
return unichr(code)
def unicode_string(string):
if isinstance(string, unicode):
return string
return string.decode('utf8', 'replace')
def is_digit(string):
# Check if basestring (str, unicode) is digit
return isinstance(string, basestring) and string.isdigit()
``` |
{
"source": "674106399/nanopose",
"score": 2
} |
#### File: model/loss/bone_loss.py
```python
import torch
import torch.nn as nn
class JointBoneLoss(nn.Module):
def __init__(self, joint_num):
super(JointBoneLoss, self).__init__()
id_i, id_j = [], []
for i in range(joint_num):
for j in range(i+1, joint_num):
id_i.append(i)
id_j.append(j)
self.id_i = id_i
self.id_j = id_j
def forward(self, joint_out, joint_gt, weights=None):
if weights is not None:
joint_out = joint_out * weights
joint_gt = joint_gt * weights
J = torch.norm(joint_out[:,self.id_i,:] - joint_out[:,self.id_j,:], p='fro', dim=-1, keepdim=False)
Y = torch.norm(joint_gt[:,self.id_i,:] - joint_gt[:,self.id_j,:], p='fro', dim=-1, keepdim=False)
loss = torch.abs(J-Y)
return loss.sum() / joint_out.shape[0]
```
#### File: model/module/conv.py
```python
import warnings
import numpy as np
import torch
import torch.nn as nn
from .activation import act_layers
from .init_weights import constant_init, kaiming_init
from .norm import build_norm_layer
class ConvModule(nn.Module):
"""A conv block that contains conv/norm/activation layers.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
conv_cfg (dict): Config dict for convolution layer.
norm_cfg (dict): Config dict for normalization layer.
activation (str): activation layer, "ReLU" by default.
inplace (bool): Whether to use inplace mode for activation.
order (tuple[str]): The order of conv/norm/activation layers. It is a
sequence of "conv", "norm" and "act". Examples are
("conv", "norm", "act") and ("act", "conv", "norm").
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias="auto",
conv_cfg=None,
norm_cfg=None,
activation="ReLU",
inplace=True,
order=("conv", "norm", "act"),
):
super(ConvModule, self).__init__()
assert conv_cfg is None or isinstance(conv_cfg, dict)
assert norm_cfg is None or isinstance(norm_cfg, dict)
assert activation is None or isinstance(activation, str)
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.activation = activation
self.inplace = inplace
self.order = order
assert isinstance(self.order, tuple) and len(self.order) == 3
assert set(order) == {"conv", "norm", "act"}
self.with_norm = norm_cfg is not None
# if the conv layer is before a norm layer, bias is unnecessary.
if bias == "auto":
bias = False if self.with_norm else True
self.with_bias = bias
if self.with_norm and self.with_bias:
warnings.warn("ConvModule has norm and bias at the same time")
# build convolution layer
self.conv = nn.Conv2d( #
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
)
# export the attributes of self.conv to a higher level for convenience
self.in_channels = self.conv.in_channels
self.out_channels = self.conv.out_channels
self.kernel_size = self.conv.kernel_size
self.stride = self.conv.stride
self.padding = self.conv.padding
self.dilation = self.conv.dilation
self.transposed = self.conv.transposed
self.output_padding = self.conv.output_padding
self.groups = self.conv.groups
# build normalization layers
if self.with_norm:
# norm layer is after conv layer
if order.index("norm") > order.index("conv"):
norm_channels = out_channels
else:
norm_channels = in_channels
self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
self.add_module(self.norm_name, norm)
else:
self.norm_name = None
# build activation layer
if self.activation:
self.act = act_layers(self.activation)
# Use msra init by default
self.init_weights()
@property
def norm(self):
if self.norm_name:
return getattr(self, self.norm_name)
else:
return None
def init_weights(self):
if self.activation == "LeakyReLU":
nonlinearity = "leaky_relu"
else:
nonlinearity = "relu"
kaiming_init(self.conv, nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.norm, 1, bias=0)
def forward(self, x, norm=True):
for layer in self.order:
if layer == "conv":
x = self.conv(x)
elif layer == "norm" and norm and self.with_norm:
x = self.norm(x)
elif layer == "act" and self.activation:
x = self.act(x)
return x
class DepthwiseConvModule(nn.Module):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
bias="auto",
norm_cfg=dict(type="BN"),
activation="ReLU",
inplace=True,
order=("depthwise", "dwnorm", "act", "pointwise", "pwnorm", "act"),
):
super(DepthwiseConvModule, self).__init__()
assert activation is None or isinstance(activation, str)
self.activation = activation
self.inplace = inplace
self.order = order
assert isinstance(self.order, tuple) and len(self.order) == 6
assert set(order) == {
"depthwise",
"dwnorm",
"act",
"pointwise",
"pwnorm",
"act",
}
self.with_norm = norm_cfg is not None
# if the conv layer is before a norm layer, bias is unnecessary.
if bias == "auto":
bias = False if self.with_norm else True
self.with_bias = bias
if self.with_norm and self.with_bias:
warnings.warn("ConvModule has norm and bias at the same time")
# build convolution layer
self.depthwise = nn.Conv2d(
in_channels,
in_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
bias=bias,
)
self.pointwise = nn.Conv2d(
in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias
)
# export the attributes of self.conv to a higher level for convenience
self.in_channels = self.depthwise.in_channels
self.out_channels = self.pointwise.out_channels
self.kernel_size = self.depthwise.kernel_size
self.stride = self.depthwise.stride
self.padding = self.depthwise.padding
self.dilation = self.depthwise.dilation
self.transposed = self.depthwise.transposed
self.output_padding = self.depthwise.output_padding
# build normalization layers
if self.with_norm:
# norm layer is after conv layer
_, self.dwnorm = build_norm_layer(norm_cfg, in_channels)
_, self.pwnorm = build_norm_layer(norm_cfg, out_channels)
# build activation layer
if self.activation:
self.act = act_layers(self.activation)
# Use msra init by default
self.init_weights()
def init_weights(self):
if self.activation == "LeakyReLU":
nonlinearity = "leaky_relu"
else:
nonlinearity = "relu"
kaiming_init(self.depthwise, nonlinearity=nonlinearity)
kaiming_init(self.pointwise, nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.dwnorm, 1, bias=0)
constant_init(self.pwnorm, 1, bias=0)
def forward(self, x, norm=True):
for layer_name in self.order:
if layer_name != "act":
layer = self.__getattr__(layer_name)
x = layer(x)
elif layer_name == "act" and self.activation:
x = self.act(x)
return x
class RepVGGConvModule(nn.Module):
"""
RepVGG Conv Block from paper RepVGG: Making VGG-style ConvNets Great Again
https://arxiv.org/abs/2101.03697
https://github.com/DingXiaoH/RepVGG
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
groups=1,
activation="ReLU",
padding_mode="zeros",
deploy=False,
**kwargs
):
super(RepVGGConvModule, self).__init__()
assert activation is None or isinstance(activation, str)
self.activation = activation
self.deploy = deploy
self.groups = groups
self.in_channels = in_channels
assert kernel_size == 3
assert padding == 1
padding_11 = padding - kernel_size // 2
# build activation layer
if self.activation:
self.act = act_layers(self.activation)
if deploy:
self.rbr_reparam = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=True,
padding_mode=padding_mode,
)
else:
self.rbr_identity = (
nn.BatchNorm2d(num_features=in_channels)
if out_channels == in_channels and stride == 1
else None
)
self.rbr_dense = nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
groups=groups,
bias=False,
),
nn.BatchNorm2d(num_features=out_channels),
)
self.rbr_1x1 = nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding_11,
groups=groups,
bias=False,
),
nn.BatchNorm2d(num_features=out_channels),
)
print("RepVGG Block, identity = ", self.rbr_identity)
def forward(self, inputs):
if hasattr(self, "rbr_reparam"):
return self.act(self.rbr_reparam(inputs))
if self.rbr_identity is None:
id_out = 0
else:
id_out = self.rbr_identity(inputs)
return self.act(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out)
# This func derives the equivalent kernel and bias in a DIFFERENTIABLE way.
# You can get the equivalent kernel and bias at any time and do whatever you want,
# for example, apply some penalties or constraints during training, just like you
# do to the other models. May be useful for quantization or pruning.
def get_equivalent_kernel_bias(self):
kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity)
return (
kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid,
bias3x3 + bias1x1 + biasid,
)
def _pad_1x1_to_3x3_tensor(self, kernel1x1):
if kernel1x1 is None:
return 0
else:
return nn.functional.pad(kernel1x1, [1, 1, 1, 1])
def _fuse_bn_tensor(self, branch):
if branch is None:
return 0, 0
if isinstance(branch, nn.Sequential):
kernel = branch[0].weight
running_mean = branch[1].running_mean
running_var = branch[1].running_var
gamma = branch[1].weight
beta = branch[1].bias
eps = branch[1].eps
else:
assert isinstance(branch, nn.BatchNorm2d)
if not hasattr(self, "id_tensor"):
input_dim = self.in_channels // self.groups
kernel_value = np.zeros(
(self.in_channels, input_dim, 3, 3), dtype=np.float32
)
for i in range(self.in_channels):
kernel_value[i, i % input_dim, 1, 1] = 1
self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)
kernel = self.id_tensor
running_mean = branch.running_mean
running_var = branch.running_var
gamma = branch.weight
beta = branch.bias
eps = branch.eps
std = (running_var + eps).sqrt()
t = (gamma / std).reshape(-1, 1, 1, 1)
return kernel * t, beta - running_mean * gamma / std
def repvgg_convert(self):
kernel, bias = self.get_equivalent_kernel_bias()
return (
kernel.detach().cpu().numpy(),
bias.detach().cpu().numpy(),
)
```
#### File: model/neck/gap_neck.py
```python
import torch
import torch.nn as nn
class GlobalAveragePooling(nn.Module):
"""Global Average Pooling neck.
Note that we use `view` to remove extra channel after pooling. We do not
use `squeeze` as it will also remove the batch dimension when the tensor
has a batch dimension of size 1, which can lead to unexpected errors.
"""
def __init__(self, output_size=(1, 1)):
super().__init__()
self.gap = nn.AdaptiveAvgPool2d(output_size)
def init_weights(self):
pass
def forward(self, inputs):
if isinstance(inputs, tuple):
outs = tuple([self.gap(x) for x in inputs])
outs = tuple(
[out.view(x.size(0), -1) for out, x in zip(outs, inputs)])
elif isinstance(inputs, list):
outs = [self.gap(x) for x in inputs]
outs = [out.view(x.size(0), -1) for out, x in zip(outs, inputs)]
elif isinstance(inputs, torch.Tensor):
outs = self.gap(inputs)
outs = outs.view(inputs.size(0), -1)
else:
raise TypeError('neck inputs should be tuple or torch.tensor')
return outs
```
#### File: nanopose/utils/path.py
```python
import os
from .rank_filter import rank_filter
@rank_filter
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def collect_files(path, exts):
file_paths = []
for maindir, subdir, filename_list in os.walk(path):
for filename in filename_list:
file_path = os.path.join(maindir, filename)
ext = os.path.splitext(file_path)[1]
if ext in exts:
file_paths.append(file_path)
return file_paths
``` |
{
"source": "674106399/Perceptron-python",
"score": 3
} |
#### File: 674106399/Perceptron-python/gaussian_kernel.py
```python
import math
class GaussianPerceptron():
def __init__(self, inputs, targets, n, d, sigma):
super(object, self).__init__()
assert n == len(inputs), 'number of inputs is not equal to n'
assert d == len(inputs[0]), 'number of attributes is not equal to d'
self.w = [0 for i in range(d)]
self.inputs = inputs
self.targets = targets
self.sigma = sigma
self.final_w = []
self.final_label = []
def kernel_gaussian(self, x1, x2, sigma=5.0):
if self.sigma:
sigma = self.sigma
L2_norm = 0
for d in range(len(x1)):
L2_norm += (x1[d] - x2[d]) ** 2
return math.exp(- L2_norm / (2 * (sigma ** 2)))
def get_label(self, idx): # map 1/0 to 1/-1
if self.targets[idx] != int(1):
label = int(-1)
else:
label = self.targets[idx]
return label
def train(self):
global iteration
iteration = True
all_w = []
labels = []
all_w.append(self.inputs[0]) # the first point is bound to be preserved
labels.append(self.get_label(0))
iteration_num = 0
while iteration:
for idx, each in enumerate(self.inputs[1:]):
label = self.get_label(idx+1)
total_m = 0
for k in range(len(all_w)):
m = self.kernel_gaussian(all_w[k], each)
total_m += m * labels[k] # for violation points, if its label=1, its mapped result will be added
if total_m * label < 0:
all_w.append(self.inputs[idx+1]) # violation, preserve this point
labels.append(label)
break
if idx == len(self.inputs)-2: # so far so good
iteration = False
if iteration_num > 70: # if iteration over 70, stop it and get result
iteration = False
iteration_num += 1
print('this is a iteration: ', iteration_num)
print('Finish')
self.final_w = all_w
self.final_label = labels
def predict(self, input_data):
# input_data: test data
# return accuracy of prediction
total_m = 0
for k in range(len(self.final_w)):
m = self.kernel_gaussian(self.final_w[k], input_data)
total_m += m * self.final_label[k]
return int(total_m > 0)
def acc(self, inputs, targets):
# inputs: test data
# targets: test label
# return accuracy of prediction
correct = 0
for idx, each in enumerate(inputs):
correct += self.predict(each) == targets[idx]
return correct / len(inputs)
``` |
{
"source": "674197141/auto-worker",
"score": 3
} |
#### File: auto-worker/compose/watch_eth.py
```python
import requests
from module.timer.task_time import scheduler
from loguru import logger
import json
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'
}
last_price = None
def watch_eth_info():
url = 'https://api.yitaifang.com/currentPrices/?markets%5B%5D=eth%7Cusdt%7Cbinance'
res = requests.get(url, headers=header)
data = res.json()
logger.info("watch_eth_info:%s" % json.dumps(data))
price = data["data"]['binance']['eth-usdt']['price']
global last_price
if last_price:
f = (price - last_price) / last_price
if abs(f) >= 0.05:
pass
print(f)
last_price = price
scheduler.add_job(
watch_eth_info,
trigger='interval',
minutes=10
)
```
#### File: auto-worker/compose/watch_okx.py
```python
import imp
import requests
from module.timer.task_time import scheduler
from loguru import logger
import json
from config import get_config
import datetime
import base64
import hmac
from hashlib import sha256
import json
from config.okx_api import *
from module.utils import okx_utils
from module.notice.mail import Mail
okx_config = get_config('okx')
base_url = okx_config.get('url')
mail_config = get_config('base')
to_mail = mail_config.get('to_mail')
def get_url(path):
return base_url+path
def get_header(method, path, body=''):
time = okx_utils.get_timestamp()
if not isinstance(body, str):
body = json.dumps(body)
base64_data = (time+method+path+body).encode('utf-8')
acc_sign = okx_config.get('accSign').encode('utf-8')
sign = base64.b64encode(
hmac.new(acc_sign, base64_data, digestmod=sha256).digest())
header = {
'Content-Type': 'application/json',
'OK-ACCESS-KEY': okx_config.get('accKey'),
'OK-ACCESS-SIGN': sign,
'OK-ACCESS-TIMESTAMP': time,
'OK-ACCESS-PASSPHRASE': okx_config.get('accPass'),
'x-simulaed-trading': '0',
}
return header
order_dc = {}
def get_order_need_update():
global order_dc
# 需要更新的订单数据
path = Api_Trade_Orders_Pending
url = get_url(path)
header = get_header('GET', path)
res = requests.get(url, headers=header)
data = res.json()
for order in data['data']:
if order['instId'] not in order_dc:
order_dc[order['instId']] = []
if order['ordId'] not in order_dc[order['instId']]:
order_dc[order['instId']].append(order['ordId'])
not_state = ['live', 'canceled']
def get_orders():
global order_dc
# 更新订单数据
add_list = []
for instId, orderId in order_dc.items():
for ordId in orderId:
path = Api_Trade_Order+"?ordId=" + \
ordId+'&instId='+instId
url = get_url(path)
header = get_header('GET', path)
res = requests.get(url, headers=header)
data = res.json()['data'][0]
log = '合约类型:{instId} 触发价格:{px} 状态:{state}'.format(
instId=instId, px=data['px'], state=data['state'])
logger.info(log)
if data['state'] in not_state:
add_list.append([instId, ordId])
continue
title = '欧易 合约订单触发通知'
text = '''
合约类型: {instId}
触发价格: {px}
'''.format(instId=instId, px=data['px'])
mail = Mail.create_mail()
mail.send(to_mail, title, text)
order_dc.clear()
for add in add_list:
if not add[0] in order_dc:
order_dc[add[0]] = []
order_dc[add[0]].append(add[1])
@scheduler.scheduled_job('interval', id='watch_contract_order', minutes=2)
def watch_contract_order():
# 合约订单监控
logger.info('========运行欧易合约监控========')
get_order_need_update()
get_orders()
watch_contract_order()
```
#### File: module/notice/mail.py
```python
import yagmail
from config import get_config
class Mail:
def __init__(self):
mail = get_config('mail')
self.user = mail.get('user')
self.password = mail.get('password')
self.host = mail.get('host')
@staticmethod
def create_mail():
mail = Mail()
return mail
def send(self, to, title, contents):
yag = yagmail.SMTP(
user=self.user, password=<PASSWORD>, host=self.host)
yag.send(to=to, subject=title,
contents=contents)
``` |
{
"source": "6759-Project/generative-downscaling",
"score": 2
} |
#### File: 6759-Project/generative-downscaling/ift6759_train_Glow_2mTemp.py
```python
import numpy as np
import xarray as xr
import tensorflow as tf
import datetime
import wandb
import os
import time
# to install climdex:
# python -m pip install git+https://github.com/bgroenks96/pyclimdex.git
# some how this doesn't work:
# python -m pip install git+https://github.com/bgroenks96/normalizing-flows
# I copied the normalizing_flow folder from github (same commit as the
# submodule in Groenke's generative-downscaling points) in this project.
# I also had to downgrade from tensorflow-probability==0.16.0 to 0.15.0
# reminder to generate new environment.yml if all works out
from utils.preprocessing import remove_monthly_means
import climdex.temperature as tdex
from normalizing_flows.models import VariationalModel, FlowLVM, JointFlowLVM, adversarial
from normalizing_flows.models.variational import nll_loss
from normalizing_flows.models.optimization import LinearWarmupSchedule
from normalizing_flows.flows import Transform, Flow, Invert
from normalizing_flows.flows.image import Upsample
from normalizing_flows.flows.glow import GlowFlow, coupling_nn_glow
from utils.distributions import normal
from tensorflow.keras.optimizers import Adamax
import matplotlib.pyplot as plt
def standardize(zarr_coarse, zarr_fine):
daily_means = zarr_coarse.groupby("date.month").mean()
daily_std = zarr_coarse.groupby("date.month").std()
coarse_standardized = (zarr_coarse - daily_means) / daily_std
fine_standardized = (zarr_fine - daily_means) / daily_std
return coarse_standardized, fine_standardized, daily_means, daily_std
def destandardize(zarr, mean, std):
zarr_destandardized = (zarr * std) + mean
return zarr_destandardized
def upsample(new_wt, new_ht, method, scale_factor=1):
@tf.function
def _upsample(x):
return tf.image.resize(x, (new_wt,new_ht), method=method) / scale_factor
return _upsample
def preprocess_vds(data_lo, data_hi, batch_size=100, buffer_size=1000, supervised=True, shuffle=True):
if not shuffle:
data = tf.data.Dataset.zip((data_lo, data_hi))
else:
if supervised:
data = tf.data.Dataset.zip((data_lo, data_hi)).shuffle(buffer_size)
else:
data = tf.data.Dataset.zip((data_lo.shuffle(buffer_size), data_hi.shuffle(buffer_size)))
return data.batch(batch_size)
indices = tdex.indices('date')
def eval_climdex(true, pred, coords):
true_arr = xr.DataArray(true, coords=coords)
pred_arr = xr.DataArray(pred, coords=coords)
txx_true = indices.monthly_txx(true_arr)
txx_pred = indices.monthly_txx(pred_arr)
txn_true = indices.monthly_txn(true_arr)
txn_pred = indices.monthly_txn(pred_arr)
txx_bias = txx_pred - txx_true
txn_bias = txn_pred - txn_true
return txx_bias, txn_bias
def spatial_mae(scale, stride=1):
"""
"Spatial" MAE auxiliary loss for generator. Penalizes outputs
which violate spatial average preservation between input and output.
"""
kernel = tf.ones((scale,scale,1,1)) / (scale**2.)
def _spatial_mse(x_in, y_pred):
x_avg = tf.nn.conv2d(x_in, kernel, strides=(stride, stride), padding='VALID')
y_avg = tf.nn.conv2d(y_pred, kernel, strides=(stride, stride), padding='VALID')
return tf.math.reduce_mean(tf.math.abs(y_avg - x_avg))
return _spatial_mse
#Function for visualizing our samples
def plot_1xn(data, titles, cmin=-10., cmax=10., save=None):
n = len(data)
fig = plt.figure(figsize=(n*9,6))
for i in range(n):
plt.subplot(1,n,i+1)
plt.imshow(data[i].numpy().squeeze(), origin='lower')
plt.colorbar(pad=0.04, shrink=0.5)
plt.suptitle(titles, y=0.85)
if save is not None: fig.savefig(save)
fig.clf()
class DataLoaderTemp:
def __init__(
self,
path_lr='data/processed/temp/5625/temp_5625_processed.zarr',
path_hr='data/processed/temp/1406/temp_1406_processed.zarr',
n_total=None,
batch_size=10,
custom_standardize=False,
supervised_training=False
):
# load the data
zarr_lr = xr.open_zarr(path_lr)
zarr_hr = xr.open_zarr(path_hr)
if custom_standardize:
zarr_lr, zarr_hr, self.means, self.std = standardize(zarr_lr, zarr_hr)
else:
# just center it to zero
zarr_lr, monthly_means_lr = remove_monthly_means(zarr_lr, time_dim='date')
zarr_hr, monthly_means_hr = remove_monthly_means(zarr_hr, time_dim='date')
# train and test split the zarr arrays
assert len(zarr_hr.date)==len(zarr_lr.date)
if n_total is None: n_total = len(zarr_hr.date)
print('n_total:', n_total)
n_train = int(0.7*n_total)
n_valid = int(0.2*n_total)
n_test = n_total-n_train-n_valid
zarr_lr_train = zarr_lr.isel(date=slice(0, n_train))
zarr_hr_train = zarr_hr.isel(date=slice(0, n_train))
zarr_lr_valid = zarr_lr.isel(date=slice(n_train, n_train+n_valid))
zarr_hr_valid = zarr_hr.isel(date=slice(n_train, n_train+n_valid))
zarr_lr_test = zarr_lr.isel(date=slice(n_train+n_valid, n_train+n_valid+n_test))
zarr_hr_test = zarr_hr.isel(date=slice(n_train+n_valid, n_train+n_valid+n_test))
# make data numpy arrays (unsuited for large data):
# each ndarray have shape [date, lat, lon]
ndarray_lr_train = zarr_lr_train.to_array().to_numpy().squeeze()
ndarray_hr_train = zarr_hr_train.to_array().to_numpy().squeeze()
ndarray_lr_valid = zarr_lr_valid.to_array().to_numpy().squeeze()
ndarray_hr_valid = zarr_hr_valid.to_array().to_numpy().squeeze()
ndarray_lr_test = zarr_lr_test.to_array().to_numpy().squeeze()
ndarray_hr_test = zarr_hr_test.to_array().to_numpy().squeeze()
# defining tensorflow datasets
# this batches the data along the date axis to yield
# n_total samples of shape [lat, lon]
# We keep the last eighth of the sample for test set.
dataset_train_lr = tf.data.Dataset.from_tensor_slices(ndarray_lr_train)
dataset_train_hr = tf.data.Dataset.from_tensor_slices(ndarray_hr_train)
dataset_valid_lr = tf.data.Dataset.from_tensor_slices(ndarray_lr_valid)
dataset_valid_hr = tf.data.Dataset.from_tensor_slices(ndarray_hr_valid)
dataset_test_lr = tf.data.Dataset.from_tensor_slices(ndarray_lr_test)
dataset_test_hr = tf.data.Dataset.from_tensor_slices(ndarray_hr_test)
# We need to naively upsample the low res data so it has the same
# dimensionality as the high res using the nearest neighbour algo
# We first add a channel to all images
dataset_train_lr = dataset_train_lr.map(lambda x: x[:,:,None])
dataset_train_hr = dataset_train_hr.map(lambda x: x[:,:,None])
dataset_valid_lr = dataset_valid_lr.map(lambda x: x[:,:,None])
dataset_valid_hr = dataset_valid_hr.map(lambda x: x[:,:,None])
dataset_test_lr = dataset_test_lr.map(lambda x: x[:,:,None])
dataset_test_hr = dataset_test_hr.map(lambda x: x[:,:,None])
# import ipdb;ipdb.set_trace()
# Then upsample the low res datasets
lat_hr, lon_hr = ndarray_hr_train.shape[1:]
dataset_train_lr = dataset_train_lr.map(upsample(lat_hr, lon_hr, tf.image.ResizeMethod.NEAREST_NEIGHBOR))
dataset_valid_lr = dataset_valid_lr.map(upsample(lat_hr, lon_hr, tf.image.ResizeMethod.NEAREST_NEIGHBOR))
dataset_test_lr = dataset_test_lr.map(upsample(lat_hr, lon_hr, tf.image.ResizeMethod.NEAREST_NEIGHBOR))
#import ipdb; ipdb.set_trace()
# zipping the data together and shuffling each dataset individually
# for "unsupervised learning"
train_ds = preprocess_vds(dataset_train_lr, dataset_train_hr, batch_size=batch_size, buffer_size=n_train, supervised=supervised_training)
valid_ds = preprocess_vds(dataset_valid_lr, dataset_valid_hr, batch_size=batch_size, buffer_size=n_valid, supervised=supervised_training)
valid_ds_paired = preprocess_vds(dataset_valid_lr, dataset_valid_hr, batch_size=100, buffer_size=n_valid, supervised=True, shuffle=False)
test_ds = preprocess_vds(dataset_test_lr, dataset_test_hr, batch_size=batch_size, buffer_size=n_test, supervised=supervised_training)
test_ds_paired = preprocess_vds(dataset_test_lr, dataset_test_hr, batch_size=100, buffer_size=n_test, supervised=True, shuffle=False)
scale = ndarray_hr_train.shape[1] // ndarray_lr_train.shape[1]
# Setting attributes
self.zarr_lr_train = zarr_lr_train
self.zarr_lr_valid = zarr_lr_valid
self.zarr_lr_test = zarr_lr_test
self.zarr_hr_train = zarr_hr_train
self.zarr_hr_valid = zarr_hr_valid
self.zarr_hr_test = zarr_hr_test
self.train_ds = train_ds
self.valid_ds = valid_ds
self.valid_ds_paired = valid_ds_paired
self.test_ds = test_ds
self.test_ds_paired = test_ds_paired
self.n_total = n_total
self.n_train = n_train
self.n_valid = n_valid
self.n_test = n_test
self.lon_hr = lon_hr
self.lat_hr = lat_hr
self.scale = scale
self.monthly_means_lr = monthly_means_lr
self.monthly_means_hr = monthly_means_hr
self.custom_standardize = custom_standardize
def main():
# launching wandb
wandb.init(project="Train-Glow-2mTemp")
# these are all args in his fit_glow_jflvm() function from glow-downscaling-maxt.ipynb
validate_freq=1
warmup=1
sample_batch_size=10
load_batch_size=1200
layers=4
depth=8
min_filters=32
max_filters=256
lam=1.0
lam_decay=0.01
alpha=1.0
n_epochs=20
custom_standardize=False
supervised_training=True
wandb.config.update({'validate_freq':validate_freq,
'warmup':warmup,
'sample_batch_size':sample_batch_size,
'load_batch_size':load_batch_size,
'layers':layers,
'depth':depth,
'min_filters':min_filters,
'max_filters':max_filters,
'lam':lam,
'lam_decay':lam_decay,
'alpha':alpha,
'n_epochs':n_epochs,
'custom_standardize':custom_standardize,
'supervised_training': supervised_training})
dl = DataLoaderTemp(batch_size=sample_batch_size, custom_standardize=custom_standardize, supervised_training=supervised_training)
flow_hr = Invert(GlowFlow(num_layers=layers, depth=depth, coupling_nn_ctor=coupling_nn_glow(max_filters=max_filters), name='glow_hr'))
flow_lr = Invert(GlowFlow(num_layers=layers, depth=depth, coupling_nn_ctor=coupling_nn_glow(max_filters=max_filters), name='glow_lr'))
dx = adversarial.PatchDiscriminator((dl.lat_hr, dl.lon_hr,1))
dy = adversarial.PatchDiscriminator((dl.lat_hr, dl.lon_hr,1))
model_joint = JointFlowLVM(flow_lr, flow_hr, dx, dy,
Gx_aux_loss=spatial_mae(dl.scale, stride=dl.scale),
Gy_aux_loss=spatial_mae(dl.scale),
input_shape=(None, dl.lat_hr, dl.lon_hr, 1))
start_time = time.time()
for i in range(n_epochs):
# training
print(f'Training joint model for {validate_freq} epochs ({i}/{n_epochs} complete)', flush=True)
#model_joint.load_weights('model_checkpoints/test_jflvm_checkpoint')
train_metrics = model_joint.train(dl.train_ds, steps_per_epoch=dl.n_train//sample_batch_size, num_epochs=validate_freq, lam=lam-lam_decay*validate_freq*i, lam_decay=lam_decay, alpha=alpha)
# evaluation
valid_eval_metrics = model_joint.evaluate(dl.valid_ds, dl.n_valid//sample_batch_size)
#Sampling and Visualizing x and y
samples_x,samples_y = model_joint.sample(n=4)
plot_1xn(samples_x, r"Samples $x \sim P(X)$", save='sampling_figures/Unconditional_X_epoch{0:02d}'.format(i))
plot_1xn(samples_y, r"Samples $y \sim P(Y)$", save='sampling_figures/Unconditional_Y_epoch{0:02d}'.format(i))
x_t, y_t = next(dl.test_ds_paired.__iter__())
# Conditional Sampling
xp_t = model_joint.predict_x(y_t)
yp_t = model_joint.predict_y(x_t)
# Visualizing Inputs & Outputs
plot_1xn([x_t[0], y_t[0], xp_t[0], yp_t[0]], r"Predictions $X \leftrightarrow Y$", save='sampling_figures/Conditional_epoch{0:02d}'.format(i))
# # Saving the model
# model_joint.save(f'model_checkpoints/jflvm_checkpoint')
# climdex
print('Evaluating valid set ClimDEX indices on predictions')
y_true, y_pred = [], []
for x, y in dl.valid_ds_paired:
y_true.append(y)
z, ildj = model_joint.G_zx.inverse(x)
y_, fldj = model_joint.G_zy.forward(z)
y_pred.append(y_)
y_true = tf.concat(y_true, axis=0)
y_pred = tf.concat(y_pred, axis=0)
#valid_mse = (tf.keras.metrics.mean_squared_error(y_true, y_pred))
mse = tf.keras.losses.MeanSquaredError()
valid_mse = mse(y_true,y_pred).numpy()
print(valid_mse)
# computing climdex indices
valid_txx_bias, valid_txn_bias = eval_climdex(np.squeeze(y_true.numpy()), np.squeeze(y_pred.numpy()), dl.zarr_hr_valid.coords)
valid_txx_bias_mean, valid_txx_bias_std = valid_txx_bias.mean().values, valid_txx_bias.std().values
valid_txn_bias_mean, valid_txn_bias_std = valid_txn_bias.mean().values, valid_txn_bias.std().values
print('valid_mse:'+str(valid_mse))
# printing climdex indices
print('valid txx_bias_mean, valid txx_bias_std:', valid_txx_bias_mean, valid_txx_bias_std)
print('valid txn_bias_mean, valid txn_bias_std:', valid_txn_bias_mean, valid_txn_bias_std)
# logging losses, metrics in WandB
for key, value in train_metrics.items():
wandb.log({'train_'+key: value[0]}, step=i)
for key, value in valid_eval_metrics.items():
wandb.log({'valid_eval_'+key: value[0]}, step=i)
# logging climdex indices in WandB
wandb.log({'valid_mse':valid_mse}, step=i)
wandb.log({'valid_txx_bias_mean':valid_txx_bias_mean}, step=i)
wandb.log({'valid_txx_bias_std':valid_txx_bias_std}, step=i)
wandb.log({'valid_txn_bias_mean':valid_txn_bias_mean}, step=i)
wandb.log({'valid_txn_bias_std':valid_txn_bias_std}, step=i)
#Test set evaluation
test_eval_metrics = model_joint.evaluate(dl.test_ds, dl.n_test//sample_batch_size)
print('Evaluating Test ClimDEX indices on predictions')
y_true, y_pred = [], []
for x, y in dl.test_ds_paired:
y_true.append(y)
z, ildj = model_joint.G_zx.inverse(x)
y_, fldj = model_joint.G_zy.forward(z)
y_pred.append(y_)
y_true = tf.concat(y_true, axis=0)
y_pred = tf.concat(y_pred, axis=0)
# converting the preds and trues to zarr with original coords
zarr_test_y_pred = xr.Dataset(data_vars={'t2m':(["date", "lat", "lon"], y_pred.numpy().squeeze())}, coords=dl.zarr_hr_test.coords)
zarr_test_y_true = xr.Dataset(data_vars={'t2m':(["date", "lat", "lon"], y_true.numpy().squeeze())}, coords=dl.zarr_hr_test.coords)
assert zarr_test_y_true.equals(dl.zarr_hr_test)
# # destandaridize
# zarr_test_y_pred = destandardize(zarr_test_y_pred, dl.means, dl.std)
# zarr_test_y_true = destandardize(zarr_test_y_true, dl.means, dl.std)
# saving them
zarr_test_y_pred.to_zarr('test_hr_pred_epoch{0:02d}.zarr'.format(i))
zarr_test_y_true.to_zarr('test_hr_true_epoch{0:02d}.zarr'.format(i))
# computing climdex indices
test_txx_bias, test_txn_bias = eval_climdex(np.squeeze(y_true.numpy()), np.squeeze(y_pred.numpy()), dl.zarr_hr_test.coords)
test_txx_bias_mean, test_txx_bias_std = test_txx_bias.mean().values, test_txx_bias.std().values
test_txn_bias_mean, test_txn_bias_std = test_txn_bias.mean().values, test_txn_bias.std().values
# printing climdex indices
print('test_txx_bias_mean, test_txx_bias_std:', test_txx_bias_mean, test_txx_bias_std)
print('test_txn_bias_mean, test_txn_bias_std:', test_txn_bias_mean, test_txn_bias_std)
total_training_time = time.time() - start_time
print('total training time:', total_training_time)
# logging losses, metrics in WandB
for key, value in test_eval_metrics.items():
wandb.log({'test_eval_'+key: value[0]}, step=i)
# logging climdex indices in WandB
wandb.log({'test_txx_bias_mean':test_txx_bias_mean}, step=i)
wandb.log({'test_txx_bias_std':test_txx_bias_std}, step=i)
wandb.log({'test_txn_bias_mean':test_txn_bias_mean}, step=i)
wandb.log({'test_txn_bias_std':test_txn_bias_std}, step=i)
wandb.log({'total_training_time':total_training_time}, step=i)
# Saving the last model
model_joint.save(f'model_checkpoints/final_jflvm_checkpoint')
if __name__ == "__main__":
main()
``` |
{
"source": "676031/imed",
"score": 3
} |
#### File: imed/bigday/telegram-sender.py
```python
import requests
def telegram_bot_sendtext(bot_message):
bot_token = "<KEY>"
bot_chatID = "-425483778"
send_text = 'https://api.telegram.org/bot' + bot_token + '/sendMessage?chat_id=' + bot_chatID + '&parse_mode=Markdown&text=' + bot_message
response = requests.get(send_text)
return response.json()
test = telegram_bot_sendtext("Testing Telegram bot")
print(test)
# def report():
# # my_balance = 10 ## Replace this number with an API call to fetch your account balance
# my_message = "Current balance is:" ## Customize your message
# telegram_bot_sendtext(my_message)
# print(my_message)
print("End")
# def external(request):
# inp=request.POST.get('keyword')
# out= run([sys.executable, 'telegram-sender.py' ,inp],shell=False, stdout=PIPE)
# print(out)
# return render(request, 'home.html',{'data':data})
# print(my_message)
# schedule.every().day.at("12:00").do(report)
# while True:
# schedule.run_pending()
# time.sleep(1)
``` |
{
"source": "67au/intel-map-client",
"score": 2
} |
#### File: src/IntelMapClient/types.py
```python
from collections import defaultdict
from itertools import groupby, product, chain
from typing import Union, Iterator
from .utils import *
class GameEntity:
__slots__ = ['guid', 'timestampMs']
def __init__(self,
guid: str,
timestampMs: int,
):
self.guid = guid
self.timestampMs = timestampMs
@property
def timestamp(self) -> datetime:
return timestamp_ms2datetime(self.timestampMs)
@classmethod
def parse(cls, data: list) -> Union['Portal', 'Link', 'Field']:
type_ = data[2][0]
if type_ == 'p':
return Portal.parse(data)
elif type_ == 'e':
return Link.parse(data)
elif type_ == 'r':
return Field.parse(data)
class PortalCore:
def __init__(self,
guid: str,
latE6: int,
lngE6: int,
):
self.guid = guid
self.latE6 = latE6
self.lngE6 = lngE6
@property
def lat(self) -> float:
return self.latE6 / 1e6
@property
def lng(self) -> float:
return self.lngE6 / 1e6
class Portal(GameEntity, PortalCore):
__slots__ = [
'type',
'team',
'latE6',
'lngE6',
'level',
'health',
'resCount',
'image',
'title',
'ornaments',
'mission',
'mission50plus',
'artifactBrief',
'_timestamp',
'mods',
'resonators',
'owner',
'artifactDetail',
'history'
]
def __init__(self,
guid: str,
timestampMs: int,
type_: str,
team: str,
latE6: int,
lngE6: int,
level: int,
health: int,
resCount: int,
image: str,
title: str,
ornaments: list,
mission: bool,
mission50plus: bool,
artifactBrief: Union[list, None],
timestamp: int,
mods: Union[list, None] = None,
resonators: Union[list, None] = None,
owner: Union[list, None] = None,
artifactDetail: Union[list, None] = None,
history: Union[int, None] = None,
):
super().__init__(guid, timestampMs)
self.type = type_
self.team = team
self.latE6 = latE6
self.lngE6 = lngE6
self.level = level
self.health = health
self.resCount = resCount
self.image = image
self.title = title
self.ornaments = ornaments
self.mission = mission
self.mission50plus = mission50plus
self.artifactBrief = artifactBrief
self._timestamp = timestamp
self.mods = mods
self.resonators = resonators
self.owner = owner
self.artifactDetail = artifactDetail
self.history = history
@classmethod
def parse(cls, data: list):
self = cls(data[0], data[1], *data[2])
return self
class Link(GameEntity):
__slots__ = ['type', 'team', 'portal1', 'portal2']
def __init__(self,
guid: str,
timestampMs: int,
type_: str,
team: str,
po1_guid: str,
po1_latE6: int,
po1_lngE6: int,
po2_guid: str,
po2_latE6: int,
po2_lngE6: int,
):
super().__init__(guid, timestampMs)
self.type = type_
self.team = team
self.portal1 = PortalCore(po1_guid, po1_latE6, po1_lngE6)
self.portal2 = PortalCore(po2_guid, po2_latE6, po2_lngE6)
@classmethod
def parse(cls, data: list):
self = cls(data[0], data[1], *data[2])
return self
class Field(GameEntity):
__slots__ = ['type', 'team', 'portal1', 'portal2', 'portal3']
def __init__(self,
guid: str,
timestampMs: int,
type_: str,
team: str,
po1_guid: str,
po1_latE6: int,
po1_lngE6: int,
po2_guid: str,
po2_latE6: int,
po2_lngE6: int,
po3_guid: str,
po3_latE6: int,
po3_lngE6: int,
):
super().__init__(guid, timestampMs)
self.type = type_
self.team = team
self.portal1 = PortalCore(po1_guid, po1_latE6, po1_lngE6)
self.portal2 = PortalCore(po2_guid, po2_latE6, po2_lngE6)
self.portal3 = PortalCore(po3_guid, po3_latE6, po3_lngE6)
@classmethod
def parse(cls, data: list):
self = cls(data[0], data[1], data[2][0], data[2][1], *data[2][2][0], *data[2][2][1], *data[2][2][2])
return self
class Plext(GameEntity):
def __init__(self,
guid: str,
timestampMs: int,
plext: dict,
):
super().__init__(guid, timestampMs)
self.text = plext['plext'].get('text')
self.team = plext['plext'].get('team')
self.markup = plext['plext'].get('markup')
self.plextType = plext['plext'].get('plextType')
self.categories = plext['plext'].get('categories')
@classmethod
def parse(cls, data: list):
self = cls(data[0], data[1], data[2])
return self
class Tile:
def __init__(self,
name: str,
portals: Union[list, None] = None,
links: Union[list, None] = None,
fields: Union[list, None] = None,
):
self.name = name
self.portals = portals or []
self.links = links or []
self.fields = fields or []
@classmethod
def parse(cls, name: str, game_entities: dict):
portals, links, fields = [], [], []
get_type = lambda x: x[2][0]
groups = groupby(sorted(game_entities['gameEntities'], key=get_type), key=get_type)
for t, ents in groups:
if t == 'p':
portals = list(map(Portal.parse, ents))
elif t == 'e':
links = list(map(Link.parse, ents))
elif t == 'r':
fields = list(map(Field.parse, ents))
self = cls(name, portals, links, fields)
return self
class MapTiles:
def __init__(self,
min_lat: float,
max_lat: float,
min_lng: float,
max_lng: float,
zoom: int,
tiles: list[tuple]):
self.min_lat = min_lat
self.max_lat = max_lat
self.min_lng = min_lng
self.max_lng = max_lng
self.zoom = zoom
self.tiles = tiles
def tileKeys(self) -> list[str]:
return [f'{self.zoom}_{x}_{y}_0_8_100' for x, y in self.tiles]
@property
def minLatE6(self) -> int:
return int(self.min_lat * 1e6)
@property
def maxLatE6(self) -> int:
return int(self.max_lat * 1e6)
@property
def minLngE6(self) -> int:
return int(self.min_lng * 1e6)
@property
def maxLngE6(self) -> int:
return int(self.max_lng * 1e6)
@staticmethod
def get_range(x, y):
return range(*(y, x + 1) if x > y else (x, y + 1))
@classmethod
def from_box(cls,
minLat: float,
minLng: float,
maxLat: float,
maxLng: float,
zoom: int = 15
) -> 'MapTiles':
zpe = get_tiles_per_edge(zoom)
x_range = cls.get_range(lng2tile(minLng, zpe), lng2tile(maxLng, zpe))
y_range = cls.get_range(lat2tile(minLat, zpe), lat2tile(maxLat, zpe))
return MapTiles(
min_lat=minLat,
max_lat=maxLat,
min_lng=minLng,
max_lng=maxLng,
zoom=zoom,
tiles=list(product(x_range, y_range))
)
@classmethod
def from_range(cls,
lat_range: tuple[float, float],
lng_range: tuple[float, float],
zoom: int = 15
) -> 'MapTiles':
minLat, maxLat = lat_range
minLng, maxLng = lng_range
return cls.from_box(minLat, minLng, maxLat, maxLng, zoom)
@classmethod
def from_square(cls,
center_lat: float,
center_lng: float,
radian_meter: int,
zoom: int = 15
) -> 'MapTiles':
dpl = 111000 # distance per lat
d_lat = 1.0 * radian_meter / dpl
d_lng = 1.0 * radian_meter / (dpl * math.cos(center_lat / 180))
return cls.from_box(center_lat - d_lat, center_lng - d_lng,
center_lat + d_lat, center_lng + d_lng, zoom)
class TileSet:
def __init__(self,
map_tiles: MapTiles,
tiles: list[Tile],
errors: Union[list, None] = None):
self.map_tiles = map_tiles
self._tiles = defaultdict(Tile)
for t in tiles:
self._tiles[t.name] = t
self.errors = set([] or errors)
def add(self, name: str, tile: Tile) -> bool:
if name in self.errors:
self._tiles[tile.name] = tile
self.errors.remove(name)
return True
else:
return False
def portals(self) -> Iterator[Portal]:
return chain.from_iterable(t.portals for t in self._tiles.values())
def links(self) -> Iterator[Portal]:
return chain.from_iterable(t.links for t in self._tiles.values())
def fields(self) -> Iterator[Portal]:
return chain.from_iterable(t.fields for t in self._tiles.values())
@classmethod
def parse(cls, map_tiles: MapTiles, data: dict):
errors = []
tiles = {}
for name, t in data['map'].items():
if 'gameEntities' in t:
tiles[name] = Tile.parse(name, t)
else:
errors.append(name)
self = cls(map_tiles, list(tiles.values()), errors)
return self
@property
def tiles(self):
return self._tiles
``` |
{
"source": "67Samuel/Generalizing-Lottery-Tickets",
"score": 3
} |
#### File: Generalizing-Lottery-Tickets/src/iterative_snip.py
```python
from utils import *
import torch
import random
import torchvision
import torch.optim as optim
import numpy as np
import wandb
import torch.nn as nn
import torch.nn.functional as F
import copy
import types
def initialize_xavier_normal(layer):
"""
Function to initialize a layer by picking weights from a xavier normal distribution
Arguments
---------
layer : The layer of the neural network
Returns
-------
None
"""
if type(layer) == nn.Conv2d:
torch.nn.init.xavier_normal_(layer.weight)
layer.bias.data.fill_(0)
def snip_forward_conv2d(self, x):
return F.conv2d(x, self.weight * self.weight_mask, self.bias,
self.stride, self.padding, self.dilation, self.groups)
def snip_forward_linear(self, x):
return F.linear(x, self.weight * self.weight_mask, self.bias)
def SNIP(net, keep_ratio, train_dataloader, device, img_size=None, num_channels=3):
# TODO: shuffle?
# Grab a single batch from the training dataset
inputs, targets = next(iter(train_dataloader))
if type(img_size) == int:
inputs = inputs.view(-1,num_channels,img_size,img_size).float().requires_grad_()
inputs = inputs.to(device)
targets = targets.to(device)
# Let's create a fresh copy of the network so that we're not worried about
# affecting the actual training-phase
net = copy.deepcopy(net)
# Monkey-patch the Linear and Conv2d layer to learn the multiplicative mask
# instead of the weights
for layer in net.modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
layer.weight_mask = nn.Parameter(torch.ones_like(layer.weight))
nn.init.xavier_normal_(layer.weight)
layer.weight.requires_grad = False
# Override the forward methods:
if isinstance(layer, nn.Conv2d):
layer.forward = types.MethodType(snip_forward_conv2d, layer)
if isinstance(layer, nn.Linear):
layer.forward = types.MethodType(snip_forward_linear, layer)
# Compute gradients (but don't apply them)
net.zero_grad()
outputs = net.forward(inputs)
loss = nn.CrossEntropyLoss()(outputs, targets)
loss.backward()
grads_abs = []
for layer in net.modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
grads_abs.append(torch.abs(layer.weight_mask.grad))
# Gather all scores in a single vector and normalise
all_scores = torch.cat([torch.flatten(x) for x in grads_abs])
norm_factor = torch.sum(all_scores)
all_scores.div_(norm_factor)
num_params_to_keep = int(len(all_scores) * keep_ratio)
threshold, _ = torch.topk(all_scores, num_params_to_keep, sorted=True)
acceptable_score = threshold[-1]
keep_masks = []
for g in grads_abs:
keep_masks.append(((g / norm_factor) >= acceptable_score).float())
print(torch.sum(torch.cat([torch.flatten(x == 1) for x in keep_masks])))
return(keep_masks)
def apply_prune_mask(net, keep_masks):
# Before I can zip() layers and pruning masks I need to make sure they match
# one-to-one by removing all the irrelevant modules:
prunable_layers = filter(
lambda layer: isinstance(layer, nn.Conv2d) or isinstance(
layer, nn.Linear), net.modules())
for layer, keep_mask in zip(prunable_layers, keep_masks):
assert (layer.weight.shape == keep_mask.shape)
def hook_factory(keep_mask):
"""
The hook function can't be defined directly here because of Python's
late binding which would result in all hooks getting the very last
mask! Getting it through another function forces early binding.
"""
def hook(grads):
return grads * keep_mask
return hook
# mask[i] == 0 --> Prune parameter
# mask[i] == 1 --> Keep parameter
# Step 1: Set the masked weights to zero (NB the biases are ignored)
# Step 2: Make sure their gradients remain zero
layer.weight.data[keep_mask == 0.] = 0.
layer.weight.register_hook(hook_factory(keep_mask)) #hook masks onto respective weights
def weight_reset(m):
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
m.reset_parameters()
def prune_iteratively(model, args, img_size, dataloader, device):
"""
Performs iterative pruning
Arguments
---------
model : the PyTorch neural network model to be trained
dataloader : PyTorch dataloader for loading the dataset
args.architecture : The neural network architecture (VGG19 or ResNet50)
args.optimizer : The optimizer to use for training (SGD / Adam)
device : Device(GPU/CPU) on which to perform computation
args.model_saving_path: Path to directory where trained model/checkpoints will be saved
Returns
--------
None
"""
if args.architecture == "vgg19":
num_epochs = 160
lr_anneal_epochs = [80, 120]
elif args.architecture == "resnet50":
num_epochs = 90
lr_anneal_epochs = [50, 65, 80]
elif args.architecture == "alexnet":
num_epochs = 500
lr_anneal_epochs = [450, 470, 480, 490]
else:
raise ValueError(args.architecture + " architecture not supported")
criterion = nn.CrossEntropyLoss().cuda()
if args.wandb:
# run wandb init
if args.optimizer == 'sgd':
lr=0.01
elif args.optimizer == 'adam':
lr=0.0003
wandb.init(entity=args.entity, project=args.project, name=args.run_name, config={'batch size':args.batch_size, 'lr':lr, 'epochs':num_epochs})
print("Iterative Pruning started")
for pruning_iter in range(0,31):
if args.wandb:
# log each iteration to wandb
wandb.log({'prune iteration':pruning_iter})
print(f"Running pruning iteration {pruning_iter}")
if args.optimizer == 'sgd':
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005)
elif args.optimizer == 'adam':
optimizer = optim.Adam(model.parameters(), lr=0.0003, weight_decay=0.0001)
else:
raise ValueError(args.optimizer + " optimizer not supported")
if (args.architecture == "vgg19") or (args.architecture == "alexnet"):
model.apply(initialize_xavier_normal)
if pruning_iter != 0:
cpt = torch.load(args.model_saving_path + f"/{pruning_iter-1}_{num_epochs}")
model.load_state_dict(cpt['model_state_dict'])
model.to(device)
# snip percentage to increase by 20% for each iteration
snip_factor = round((100*(0.8**(pruning_iter+1)))/100, 5)
print(f"Pruning 20% of latest model weights with SNIP, snip factor: {snip_factor}...")
keep_masks = SNIP(model, snip_factor, dataloader, device, img_size=img_size)
# Reinitialise weights
if args.reinit:
model.apply(weight_reset)
# Apply mask
apply_prune_mask(model, keep_masks)
for epoch in range(1, num_epochs+1):
if args.wandb:
# log each epoch
wandb.log({'epochs':epoch})
if epoch in lr_anneal_epochs:
# decrease lr at previously specified epochs
optimizer.param_groups[0]['lr'] /= 10
for batch_num, data in enumerate(dataloader, 0):
inputs, labels = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
if args.wandb:
# log loss at each epoch
wandb.log({'prune loss':loss.item()})
loss.backward()
optimizer.step()
if args.wandb:
# log lr at each epoch
wandb.log({'train lr':optimizer.param_groups[0]['lr']})
if (epoch == num_epochs):
# save model at the end of each iteration, file looks like 1_500, 2_500 etc
print(f'saving checkpoint to {args.model_saving_path}/{str(pruning_iter)}_{str(num_epochs)}...')
torch.save({'epoch': epoch,'model_state_dict': model.state_dict(),'optimizer_state_dict': optimizer.state_dict() },args.model_saving_path + "/"+ str(pruning_iter) + "_" + str(num_epochs))
print("Finished Iterative Pruning")
if __name__ == '__main__':
#Parsers the command line arguments
parser = args_parser_iterprune()
args = parser.parse_args()
#Sets random seed
random.seed(args.seed)
#Uses GPU is available
device = torch.device(f"cuda:{args.gpu}" if torch.cuda.is_available() else "cpu")
print(f'Using {device} device.')
#Checks number of classes for appropriate linear layer at end of model
if args.source_dataset in ['cifar10', 'svhn', 'fashionmnist']:
num_classes_source = 10
elif args.source_dataset in ['cifar100']:
num_classes_source = 100
else:
raise ValueError(args.source_dataset + " as a source dataset is not supported")
#Loads dataset
dataloader = load_dataset(args.target_dataset, args.batch_size, True)
#Loads model
model = load_model(args.architecture, num_classes_target)
# Get image size depending on dataset to use in SNIP function
if args.target_dataset in ['cifar10', 'cifar100', 'svhn', 'cifar10a', 'cifar10b']:
img_size = 32
elif args.target_dataset == 'fashionmnist':
img_size = 28
else:
raise ValueError(args.target_dataset + " dataset not supported")
prune_iteratively(model, args, img_size, dataloader, device)
``` |
{
"source": "67Samuel/MobileNetV3-Pytorch",
"score": 2
} |
#### File: 67Samuel/MobileNetV3-Pytorch/main.py
```python
import torch
import torch.nn as nn
import torch.optim as optim
from preprocess import load_data
from model import MobileNetV3
import argparse
from tqdm import tqdm
import time
import os
from collections import OrderedDict
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
def get_args():
parser = argparse.ArgumentParser("parameters")
parser.add_argument("--dataset-mode", type=str, default="IMAGENET", help="(example: CIFAR10, CIFAR100, IMAGENET), (default: IMAGENET)")
parser.add_argument("--epochs", type=int, default=100, help="number of epochs, (default: 100)")
parser.add_argument("--batch-size", type=int, default=512, help="number of batch size, (default, 512)")
parser.add_argument("--learning-rate", type=float, default=1e-1, help="learning_rate, (default: 1e-1)")
parser.add_argument("--dropout", type=float, default=0.8, help="dropout rate, not implemented yet, (default: 0.8)")
parser.add_argument('--model-mode', type=str, default="LARGE", help="(example: LARGE, SMALL), (default: LARGE)")
parser.add_argument("--load-pretrained", type=bool, default=False, help="(default: False)")
parser.add_argument('--evaluate', type=bool, default=False, help="Testing time: True, (default: False)")
parser.add_argument('--multiplier', type=float, default=1.0, help="(default: 1.0)")
parser.add_argument('--print-interval', type=int, default=5, help="training information and evaluation information output frequency, (default: 5)")
parser.add_argument('--data', default='D:/ILSVRC/Data/CLS-LOC')
parser.add_argument('--workers', type=int, default=4)
parser.add_argument('--distributed', type=bool, default=False)
parser.add_argument('--prune', type=str, default=False)
parser.add_argument('--snip-percentage', type=int, default=0)
parser.add_argument('--prefix', type=str, default="")
args = parser.parse_args()
return args
import torch.nn as nn
import torch.nn.functional as F
import copy
import types
def snip_forward_conv2d(self, x):
return F.conv2d(x, self.weight * self.weight_mask, self.bias,
self.stride, self.padding, self.dilation, self.groups)
def snip_forward_linear(self, x):
return F.linear(x, self.weight * self.weight_mask, self.bias)
def SNIP(net, keep_ratio, train_dataloader, device, img_size=None, num_channels=3):
# TODO: shuffle?
# Grab a single batch from the training dataset
inputs, targets = next(iter(train_dataloader))
if type(img_size) == int:
inputs = inputs.view(-1,num_channels,img_size,img_size).float().requires_grad_()
inputs = inputs.to(device)
targets = targets.to(device)
# Let's create a fresh copy of the network so that we're not worried about
# affecting the actual training-phase
net = copy.deepcopy(net)
# Monkey-patch the Linear and Conv2d layer to learn the multiplicative mask
# instead of the weights
for layer in net.modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
layer.weight_mask = nn.Parameter(torch.ones_like(layer.weight))
nn.init.xavier_normal_(layer.weight)
layer.weight.requires_grad = False
# Override the forward methods:
if isinstance(layer, nn.Conv2d):
layer.forward = types.MethodType(snip_forward_conv2d, layer)
if isinstance(layer, nn.Linear):
layer.forward = types.MethodType(snip_forward_linear, layer)
# Compute gradients (but don't apply them)
net.zero_grad()
outputs = net.forward(inputs)
loss = nn.CrossEntropyLoss()(outputs, targets)
loss.backward()
grads_abs = []
for layer in net.modules():
if isinstance(layer, nn.Conv2d) or isinstance(layer, nn.Linear):
grads_abs.append(torch.abs(layer.weight_mask.grad))
# Gather all scores in a single vector and normalise
all_scores = torch.cat([torch.flatten(x) for x in grads_abs])
norm_factor = torch.sum(all_scores)
all_scores.div_(norm_factor)
num_params_to_keep = int(len(all_scores) * keep_ratio)
threshold, _ = torch.topk(all_scores, num_params_to_keep, sorted=True)
acceptable_score = threshold[-1]
keep_masks = []
for g in grads_abs:
keep_masks.append(((g / norm_factor) >= acceptable_score).float())
print(f"{torch.sum(torch.cat([torch.flatten(x == 1) for x in keep_masks]))} parameters kept, {torch.sum(torch.cat([torch.flatten(x==0) for x in keep_masks]))} parameters pruned")
return(keep_masks)
def apply_prune_mask(net, keep_masks):
# Before I can zip() layers and pruning masks I need to make sure they match
# one-to-one by removing all the irrelevant modules:
prunable_layers = filter(
lambda layer: isinstance(layer, nn.Conv2d) or isinstance(
layer, nn.Linear), net.modules())
for layer, keep_mask in zip(prunable_layers, keep_masks):
assert (layer.weight.shape == keep_mask.shape)
def hook_factory(keep_mask):
"""
The hook function can't be defined directly here because of Python's
late binding which would result in all hooks getting the very last
mask! Getting it through another function forces early binding.
"""
def hook(grads):
return grads * keep_mask
return hook
# mask[i] == 0 --> Prune parameter
# mask[i] == 1 --> Keep parameter
# Step 1: Set the masked weights to zero (NB the biases are ignored)
# Step 2: Make sure their gradients remain zero
layer.weight.data[keep_mask == 0.] = 0.
layer.weight.register_hook(hook_factory(keep_mask)) #hook masks onto respective weights
def initialize_xavier_normal(layer):
"""
Function to initialize a layer by picking weights from a xavier normal distribution
Arguments
---------
layer : The layer of the neural network
Returns
-------
None
"""
if type(layer) == nn.Conv2d:
torch.nn.init.xavier_normal_(layer.weight)
layer.bias.data.fill_(0)
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.learning_rate * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# reference,
# https://github.com/pytorch/examples/blob/master/imagenet/main.py
# Thank you.
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
def train(train_loader, model, criterion, optimizer, epoch, args):
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(train_loader), batch_time, data_time, losses, top1, top5, prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
for i, (data, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
data, target = data.to(device), target.to(device)
# if args.gpu is not None:
# data = data.cuda(args.gpu, non_blocking=True)
# target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(data)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), data.size(0))
top1.update(acc1[0], data.size(0))
top5.update(acc5[0], data.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_interval == 0:
progress.print(i)
def validate(val_loader, model, criterion, args):
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(len(val_loader), batch_time, losses, top1, top5,
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (data, target) in enumerate(val_loader):
# if args.gpu is not None:
# input = input.cuda(args.gpu, non_blocking=True)
# target = target.cuda(args.gpu, non_blocking=True)
data, target = data.to(device), target.to(device)
# compute output
output = model(data)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), data.size(0))
top1.update(acc1[0], data.size(0))
top5.update(acc5[0], data.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_interval == 0:
progress.print(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg
class ProgressMeter(object):
def __init__(self, num_batches, *meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def print(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def main():
args = get_args()
train_loader, test_loader = load_data(args)
if args.dataset_mode == "CIFAR10":
num_classes = 10
elif args.dataset_mode == "CIFAR100":
num_classes = 100
elif args.dataset_mode == "IMAGENET":
num_classes = 1000
print('num_classes: ', num_classes)
model = MobileNetV3(model_mode=args.model_mode, num_classes=num_classes, multiplier=args.multiplier, dropout_rate=args.dropout).to(device)
if torch.cuda.device_count() >= 1:
print("num GPUs: ", torch.cuda.device_count())
model = nn.DataParallel(model).to(device)
if args.load_pretrained or args.evaluate:
filename = "best_model_" + str(args.model_mode) +str(args.prefix)
try:
dp_model = torch.load('./checkpoint/' + filename + '_dp_model.t7')
model.load_state_dict(dp_model)
except:
#print(filename+"_dp_model.t7 is not found")
checkpoint = torch.load('./checkpoint/' + filename + '_ckpt.t7')
model.load_state_dict(checkpoint['model'])
epoch = checkpoint['epoch']
acc1 = checkpoint['best_acc1']
acc5 = checkpoint['best_acc5']
best_acc1 = acc1
print("Load Model Accuracy1: ", acc1, " acc5: ", acc5, "Load Model end epoch: ", epoch)
else:
print("init model load ...")
epoch = 1
best_acc1 = 0
if args.prune:
print(f"Pruning {args.snip_percentage}% of weights with SNIP...")
# get snip factor in form required for SNIP function
snip_factor = (100 - args.snip_percentage)/100
keep_masks = SNIP(model, snip_factor, train_loader, device)
apply_prune_mask(model, keep_masks)
optimizer = optim.SGD(model.parameters(), lr=args.learning_rate, weight_decay=1e-5, momentum=0.9)
# optimizer = optim.RMSprop(model.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=1e-5)
criterion = nn.CrossEntropyLoss().to(device)
if args.evaluate:
acc1, acc5 = validate(test_loader, model, criterion, args)
print("Acc1: ", acc1, "Acc5: ", acc5)
return
if not os.path.isdir("reporting"):
os.mkdir("reporting")
start_time = time.time()
with open("./reporting/" + "best_model_" + args.model_mode + args.prefix + ".txt", "w") as f:
for epoch in range(epoch, args.epochs):
adjust_learning_rate(optimizer, epoch, args)
train(train_loader, model, criterion, optimizer, epoch, args)
acc1, acc5 = validate(test_loader, model, criterion, args)
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if is_best:
print('Saving..')
best_acc5 = acc5
state = {
'model': model.state_dict(),
'best_acc1': best_acc1,
'best_acc5': best_acc5,
'epoch': epoch,
}
if not os.path.isdir('checkpoint'):
os.mkdir('checkpoint')
filename = "best_model_" + str(args.model_mode)
torch.save(state, './checkpoint/' + filename + args.prefix + '_ckpt.t7')
torch.save(model.module.state_dict(), './checkpoint/' + filename + args.prefix + '_dp_model.t7')
time_interval = time.time() - start_time
time_split = time.gmtime(time_interval)
print("Training time: ", time_interval, "Hour: ", time_split.tm_hour, "Minute: ", time_split.tm_min, "Second: ", time_split.tm_sec, end='')
print(" Test best acc1:", best_acc1, " acc1: ", acc1, " acc5: ", acc5)
f.write("Epoch: " + str(epoch) + " " + " Best acc: " + str(best_acc1) + " Test acc: " + str(acc1) + "\n")
f.write("Training time: " + str(time_interval) + " Hour: " + str(time_split.tm_hour) + " Minute: " + str(
time_split.tm_min) + " Second: " + str(time_split.tm_sec))
f.write("\n")
if __name__ == "__main__":
main()
``` |
{
"source": "6851-2017/timetree",
"score": 3
} |
#### File: timetree/backend/base_dnode.py
```python
from abc import ABCMeta
from abc import abstractmethod
from .base_util import BaseCopyableVnode
class BaseDnode(metaclass=ABCMeta):
__slots__ = ('backend',)
def __init__(self, backend):
self.backend = backend
@abstractmethod
def get(self, field, version_num):
pass
@abstractmethod
def set(self, field, value, version_num):
pass
@abstractmethod
def delete(self, field, version_num):
pass
class BaseDnodeBackedVnode(BaseCopyableVnode):
__slots__ = ('dnode', )
dnode_cls = BaseDnode # Illegal
def __init__(self, version, *, dnode=None):
super().__init__(version)
if dnode is not None:
# Restore an old vnode
self.dnode = dnode
return
self.dnode = self.dnode_cls(self.backend)
def get(self, field):
super().get(field)
result = self.dnode.get(field, self.version.version_num)
if isinstance(result, self.dnode_cls):
result = self.__class__(self.version, dnode=result)
return result
def set(self, field, value):
super().set(field, value)
if self.backend.is_vnode(value):
value = value.dnode
self.dnode.set(field, value, self.version.version_num)
def delete(self, field):
super().delete(field)
self.dnode.delete(field, self.version.version_num)
def copy(self, version):
return self.__class__(version, dnode=self.dnode)
def __eq__(self, other):
return (self.version, self.dnode) == (other.version, other.dnode)
def __hash__(self):
return hash((self.version, self.dnode))
```
#### File: timetree/backend/base_linearized_full.py
```python
from abc import ABCMeta
from .base import BaseVersion
from .base_util import BaseCopyableVnode
from .base_util import BaseDivergentBackend
from .util.order_maintenance import FastLabelerList
from .util.order_maintenance import FastLabelerNode
class BaseLinearizedFullBackend(BaseDivergentBackend):
__slots__ = ('version_list', 'v_0', 'v_inf')
vnode_cls = BaseCopyableVnode # Type of vnodes to create, should be Copyable
def __init__(self):
self.version_list = FastLabelerList()
self.v_0 = FastLabelerNode()
self.v_inf = FastLabelerNode()
self.version_list.insert_after(None, self.v_0)
self.version_list.insert_after(self.v_0, self.v_inf)
def _commit(self, vnodes):
""" Default just makes a shallow copy of vnodes and returns it """
super()._commit(vnodes)
if not vnodes:
return LinearizedFullCommit(self, self.v_0), []
head = vnodes[0].version
version_num = head.version_num
commit = LinearizedFullCommit(self, version_num)
result = []
for vnode in vnodes:
new_vnode = vnode.copy(commit)
result.append(new_vnode)
new_version_num = FastLabelerNode()
self.version_list.insert_after(version_num, new_version_num)
head.version_num = new_version_num
return commit, result
def _branch(self, vnodes):
super()._branch(vnodes)
version_num = vnodes[0].version.version_num if vnodes else self.v_0
# Make new versions (and un-version)
new_version_num = FastLabelerNode()
self.version_list.insert_after(version_num, new_version_num)
head = LinearizedFullHead(self, new_version_num, self.vnode_cls)
result = []
for vnode in vnodes:
new_vnode = vnode.copy(head)
result.append(new_vnode)
return head, result
class BaseLinearizedFullVersion(BaseVersion, metaclass=ABCMeta):
__slots__ = ()
class LinearizedFullHead(BaseVersion):
__slots__ = ('vnode_cls', 'version_num',)
def __init__(self, backend, version_num, vnode_cls):
super().__init__(backend, is_head=True)
self.vnode_cls = vnode_cls
self.version_num = version_num
# Make sure that the version number and its successor aren't the
# endpoint of the versions
assert version_num.is_node and version_num.next.is_node
def new_node(self):
return self.vnode_cls(self)
class LinearizedFullCommit(BaseVersion):
__slots__ = ('version_num',)
def __init__(self, backend, version_num):
super().__init__(backend, is_head=False)
self.version_num = version_num
def new_node(self):
raise ValueError("Can't create a node from a commit")
```
#### File: timetree/backend/base.py
```python
from abc import ABCMeta
from abc import abstractmethod
from collections import defaultdict
__all__ = ['BaseBackend', 'BaseVersion', 'BaseVnode']
class BaseBackend(metaclass=ABCMeta):
""" Abstract base class for persistence backends
Timetree's general goal is to express persistent data structures:
structures in which we can "travel back in time" to a previous state. To
do this, we'll represent the state of the data structure at each
particular "point in time" or "version" as a separate pointer machine of
"vnodes" (virtual or versioned nodes), represented by classes
`BaseVersion` and `BaseVnode`. Vnodes are each bound to a particular
version, and each have fields keyed by strings, which can be manipulated
with :py:meth:`.get`, :py:meth:`.set` and :py:meth:`.delete`. Fields may
point to other vnodes in the same machine or to external (ideally
immutable) data not in the timetree, but *not* to other versions in the
timetree.
The structure of the versions can take on various forms, but most
generally, they form a DAG. Edges in the DAG represent implicit copies:
we construct each new pointer machine as some set of edits
(`set`s/`delete`s) on top of the union of copies of some other existing
pointer machines, which are the parents in the DAG. (This describes
confluent persistence. In the case of fully persistent data structures,
the versions form a tree, as each new machine is an edit of only a
single previous version; machines at different times cannot merge.)
Because we are working with only persistence, vnodes can only be modified
if their corresponding version is a terminal node (a leaf in a tree) in
the DAG. Thus, we can think of modifiable (terminal/leaf) versions and
frozen (internal) versions. From here on, we'll call the modifiable
versions "heads", and frozen versions "commits". Note that vnodes can be
bound to heads or commits. We also won't enforce that commits must be
internal, though heads always must be leaves.
Initially, the tree contains only a single base commit which is an empty
pointer machine. We provide two different operations to create new versions
and heads.
The first is :py:meth:`.branch(vnodes)`, which takes a list of vnodes.
Branch returns a new head based off of all commits of vnodes given,
and returns vnodes bound to the new head corresponding to copies of the
input vnodes.
Second, we have :py:meth:`.commit(vnodes)`. This takes a list of vnodes
bound to a single head version and creates a (frozen) copy of it (a
commit). This commit shares the same parents as the head in the version
DAG, and we update the head's parent to be only the newly created commit
(we split the head into an edge). Alternatively, we mark the head as
frozen, create a new head on top, and update all vnodes originally bound
to head to bind to the new head. We return the commit, as well as
corresponding copies of argument `vnodes`, but rebound to commit.
To make the operations clearer, we show an alternative view of the version
pool. We consider the set of only commits. Then, a "head" is a working
copy of (one or more) commit(s), which we can muck around with and edit.
`branch` creates a new head and lets us access its vnodes, while `commit`
takes an existing head and saves a copy of the current state as a commit.
In this sense, "heads" and "commits" somewhat match Git's usage of these
words.
"""
__slots__ = ()
def is_vnode(self, value):
""" Check if a value is a vnode of this backend
:param value: The value to check
:return: True if it is a vnode
"""
return isinstance(value, BaseVnode) and value.backend is self
def commit(self, vnodes=None):
""" Create a new commit based on the given head
In different views, this can be:
- Split the current head in the version DAG into a commit and a
head.
- Change the head to a commit, create a new head on top,
and implicitly rebind all references to head to the new head.
- Add a copy of head as a commit to the pool of commits.
If vnodes is empty or not given, then the new commit is the base commit.
:param vnodes: Vnodes which we would like references to; must be
bound to the same head
:return: Reference to the new commit, and if vnodes is given, a list of
`vnodes` rebound to it
"""
# The default implementation sanitize vnodes into a list and
# validates things
if vnodes is None:
return self._commit([])[0]
else:
vnodes = list(vnodes)
return self._commit(vnodes)
@abstractmethod
def _commit(self, vnodes):
""" Internal implementation of commit """
if not isinstance(vnodes, list):
raise TypeError("vnodes should be a list")
if not all(self.is_vnode(vnode) for vnode in vnodes):
raise ValueError('Invalid vnode in commit')
if vnodes:
head = vnodes[0].version
if not all(vnode.version == head for vnode in vnodes):
raise ValueError('Vnodes must all have the same version')
if not head.is_head:
raise ValueError('Vnode version not a head')
def branch(self, vnodes=None):
""" Create a new head based on the given vnodes
The head's pointer machine is the disjoint union of the pointer
machines of the originating commits of the given vnodes.
If vnodes is empty, then create a new branch based off of the base commit.
:param vnodes: Vnodes which we would like references to; uncommitted
ones are committed
:return: Reference to the new head, and if vnodes is given, a list of
`vnodes` rebound to it
"""
# The default implementation sanitize vnodes into a list and
# validates things
if vnodes is None:
return self._branch([])[0]
else:
vnodes = list(vnodes)
committed_vnodes = [None] * len(vnodes)
heads_by_version = defaultdict(list)
for i, vnode in enumerate(vnodes):
if vnode.version.is_head:
heads_by_version[vnode.version].append((i, vnode))
else:
committed_vnodes[i] = vnode
for version, heads in heads_by_version.items():
_, commits = self.commit(vnode for i, vnode in heads)
for (i, old_vnode), new_vnode in zip(heads, commits):
committed_vnodes[i] = new_vnode
return self._branch(committed_vnodes)
@abstractmethod
def _branch(self, vnodes):
""" Internal implementation of branch """
if not isinstance(vnodes, list):
raise TypeError("vnodes should be a list")
if not all(self.is_vnode(vnode) for vnode in vnodes):
raise ValueError('Invalid vnode in branch')
if not all(vnode.version.is_commit for vnode in vnodes):
raise ValueError('Vnode is not a commit')
class BaseVersion(metaclass=ABCMeta):
""" Abstract base class for versions of backends """
__slots__ = ('backend', 'is_head', )
def __init__(self, backend, is_head):
self.backend = backend
self.is_head = is_head
@abstractmethod
def new_node(self):
""" Create a new vnode in this version
Version must be a head
:return: A new vnode in the version
"""
if not self.is_head:
raise ValueError("Can only create in head versions")
@property
def is_commit(self):
""" Returns whether a version is a commit or a head
:return: Boolean of True if it's a commit and otherwise False
"""
return not self.is_head
class BaseVnode(metaclass=ABCMeta):
""" Abstract base class for vnodes of backends """
__slots__ = ('version', )
def __init__(self, version):
self.version = version
@property
def backend(self):
""" Return the backend of this vnode """
return self.version.backend
@abstractmethod
def get(self, field):
""" Get a field of a vnode
:param field: Field name
:return: Field value
:raises KeyError: Field not found in vnode
"""
@abstractmethod
def set(self, field, value):
""" Set a field of a vnode
:param field: Field name
:param value: New value to set
Must be in the same version if it's also a vnode.
:return: None
:raises ValueError: Value is a vnode but isn't at the same version
"""
if not self.version.is_head:
raise ValueError("Can only set in head versions")
if self.backend.is_vnode(value):
if self.version != value.version:
raise ValueError("Mismatched versions")
@abstractmethod
def delete(self, field):
""" Delete a field of a vnode
:param field: Field name
:return: None
:raises KeyError: Field not found in vnode
"""
if not self.version.is_head:
raise ValueError("Can only delete from head versions")
def commit(self):
""" Commit this vnode and return the new vnode
Equivalent to extracting the vnode from self.backend.commit:
return self.backend.commit([self])[1][0]
"""
new_version, [new_vnode] = self.backend.commit([self])
return new_vnode
def branch(self):
""" Branch from this vnode and return the new vnode
Equivalent to extracting the vnode from self.backend.branch:
return self.backend.branch([self])[1][0]
"""
new_version, [new_vnode] = self.backend.branch([self])
return new_vnode
```
#### File: timetree/backend/base_util.py
```python
from abc import ABCMeta
from abc import abstractmethod
from .base import BaseBackend
from .base import BaseVnode
class BaseCopyableVnode(BaseVnode, metaclass=ABCMeta):
__slots__ = ()
@abstractmethod
def copy(self, new_version):
""" We guarantee the new_version and the current version correspond
to the same represented object
"""
pass
class BaseDivergentBackend(BaseBackend, metaclass=ABCMeta):
""" (Optional) base class for non-confluent backends
"""
__slots__ = ()
@abstractmethod
def _branch(self, vnodes):
super()._branch(vnodes)
if vnodes:
commit = vnodes[0].version
if not all(vnode.version == commit for vnode in vnodes):
raise NotImplementedError('Vnodes must all have the same version')
```
#### File: timetree/backend/copy.py
```python
from .base import BaseBackend
from .base import BaseVersion
from .base import BaseVnode
class CopyBackend(BaseBackend):
""" Timetree backend which copies everything always
Designed to be a reference implementation
"""
def __init__(self):
super().__init__()
def _commit(self, vnodes):
super()._commit(vnodes)
commit = CopyVersion(self, is_head=False)
return commit, self._clone(vnodes, commit)
def _branch(self, vnodes):
super()._branch(vnodes)
head = CopyVersion(self, is_head=True)
return head, self._clone(vnodes, head)
def _clone(self, vnodes, version):
""" Clone vnodes under a new version
:param vnodes: Vnodes to clone
:param version: New version
:return: Mapping of vnodes
"""
old_versions = {vnode.version for vnode in vnodes}
node_maps = dict()
for old_version in old_versions:
node_map = {vnode: CopyVnode(version) for vnode in old_version.vnodes}
for vnode, new_vnode in node_map.items():
# Write in the new values
new_vnode.values = {
k: node_map[v] if self.is_vnode(v) else v
for k, v in vnode.values.items()
}
version.vnodes.extend(node_map.values())
node_maps[old_version] = node_map
return [node_maps[vnode.version][vnode] for vnode in vnodes]
class CopyVersion(BaseVersion):
__slots__ = ('vnodes',)
def __init__(self, backend, is_head):
super().__init__(backend, is_head)
self.vnodes = []
def new_node(self):
super().new_node()
vnode = CopyVnode(self)
self.vnodes.append(vnode)
return vnode
class CopyVnode(BaseVnode):
__slots__ = ('values',)
def __init__(self, version):
super().__init__(version)
self.values = dict()
def get(self, field):
super().get(field)
if field not in self.values:
raise KeyError
return self.values[field]
def set(self, field, value):
super().set(field, value)
self.values[field] = value
def delete(self, field):
super().delete(field)
if field not in self.values:
raise KeyError
del self.values[field]
``` |
{
"source": "6851-2021/Cache-Oblivious-Data-Structures",
"score": 3
} |
#### File: 6851-2021/Cache-Oblivious-Data-Structures/perf.py
```python
import pandas as pd
import argparse
import re
import os
from subprocess import PIPE, run
from sys import argv
class PerfObj:
def __init__(self):
self.df = pd.DataFrame()
self.stat_of_interest = ['cache-references', 'cache-misses', "L1-dcache-load-misses", "L1-dcache-loads", "L1-dcache-stores",\
"LLC-load-misses", "LLC-loads", "LLC-store-misses", "LLC-stores", "cpu-clock"]
def parse_perf_stat(self, perf_stat, name, n, q):
data = perf_stat.split()
parsed_data = {"name": name, "n": n, "q": q, "input": f"{n}_{q}"}
self.extract_names = self.stat_of_interest
for i in range(len(data)):
if data[i] in self.stat_of_interest:
if data[i] == "cpu-clock":
parsed_data[data[i]] = data[i-2].replace(",", "")
else:
parsed_data[data[i]] = self.__parse_int(data[i-1])
self.df = self.df.append(parsed_data, ignore_index=True)
return parsed_data
def __parse_int(self, integer):
return int(integer.replace(",", ""))
def __str__(self):
col =["name"] + self.stat_of_interest
return self.get_records()[col].to_string(index = False)
def record_cache(self, program_name, n=None, q=None):
command = ["perf", "stat", "-e", ",".join(self.stat_of_interest), program_name]
#format input
inputstr = ""
if n and q:
inputstr = f"{n} {q}\n"
result = run(command, stdout=PIPE, universal_newlines=True, input=inputstr, stderr=PIPE)
parsed_data = self.parse_perf_stat(result.stderr, program_name, n, q)
return parsed_data
# returns a data frame
def get_records(self):
self.df["cache-references"] = self.df["cache-references"].astype(int)
self.df["cache-misses"] = self.df["cache-misses"].astype(int)
self.df["L1-dcache-load-misses"] = self.df["L1-dcache-load-misses"].astype(int)
self.df["LLC-load-misses"] = self.df["LLC-load-misses"].astype(int)
self.df["cpu-clock"] = self.df["cpu-clock"].astype(float)
return self.df
if __name__ == '__main__':
# perf = PerfObj()
# perf.record_cache("./perf_co_sst")
# perf.record_cache("./perf_sst")
# perf.record_cache("./perf_ca_sst")
# perf.record_cache("./perf_built_co_sst")
# print(perf)
if len(argv) > 3:
perf = PerfObj()
program_name = argv[1]
n = int(argv[2])
q = int(argv[3])
perf.record_cache(program_name, n, q)
print(perf)
else:
print("Usage python perf.py <program> <n> <q>")
``` |
{
"source": "6851-2021/matrix-walker",
"score": 3
} |
#### File: matrix-walker/py/matrix_walker.py
```python
from math import log2, ceil
from itertools import chain
from cache import Cache
import random
class MatrixWalker:
def _next_pow_2(self, n):
return int(2**(ceil(log2(n))))
def _matrix_to_array(self, mat):
arr = [-1] * (self.n_pw2**2)
for i in range(len(mat)):
for j in range(len(mat)):
arr[self.translate(i,j)] = mat[i][j]
return arr
def __init__(self, mat, cache_width, cache_height):
self.mat = mat
self.size = len(mat)
self.n_pw2 = self._next_pow_2(len(mat))
self.arr = self._matrix_to_array(mat)
self.i = random.randrange(len(mat))
self.j = random.randrange(len(mat))
self.loc = 0 # location in flattened array
self.val = self.arr[self.translate(self.i, self.j)]
self.cache = Cache(cache_width, cache_height)
@classmethod
# alternative constructor, analogous to NumPy
def zeros(cls, size):
mat = [[0]*size] * size
return cls(mat)
def _move(self, i, j):
if i< 0 or i>=self.size or j<0 or j>=self.size:
raise IndexError('attempted to move out of bounds')
self.i = i
self.j = j
self.loc = self.translate(i, j)
self.val = self.arr[self.loc]
self.cache.access(self.get_cache_index(i, j))
def random_teleport(self):
self._move(random.randrange(len(mat)), random.randrange(len(mat)))
def teleport(self, i, j):
self._move(i,j)
def left(self):
self._move(self.i, self.j - 1)
def right(self):
self._move(self.i, self.j + 1)
def up(self):
self._move(self.i - 1, self.j)
def down(self):
self._move(self.i + 1, self.j)
def get(self):
return self.val
def __setitem__(self, index, val):
i = index[0]
j = index[1]
self.arr[self.translate(i, j)] = val
def get_cache_index(self, i, j):
return self.translate(i, j) // self.cache.width
class ZWalker(MatrixWalker):
def translate(self, i, j):
bin_i = bin(i+self.n_pw2)[3:] # ensure correct length of bin repr
bin_j = bin(j+self.n_pw2)[3:]
interleaved = ''.join(chain(*zip(bin_i, bin_j)))
return int(interleaved, 2)
class HilbertWalker(MatrixWalker):
def translate(self, i, j):
# recurse into quadrants (done iteratively here)
base_case = [0, 1, 3, 2]
ret = 0
for pow in range(int(log2(self.n_pw2))-1, -1, -1):
mask = 2**pow
quadrant = ((i & mask == mask) << 1) + (j & mask == mask)
ret += base_case[quadrant]
i &= mask - 1
j &= mask - 1
if quadrant == 0: # Flip next layer depending on quadrant
i, j = j, i
elif quadrant == 2:
i, j = mask - 1 - j, mask - 1 - i
if pow > 0: # Make room for next recursive layer
ret <<= 2
return ret
class NaiveWalker(MatrixWalker):
def translate(self, i, j):
return self.size * i + j
if __name__=="__main__":
small = [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
# mw = NaiveWalker(small, 8, 128)
hw = HilbertWalker(small, 8, 128)
# zw = ZWalker(small, 8, 128)
# print("Arrays of size 4")
# print(f"Naive:\n{mw.arr}\nHilbert:\n{hw.arr}\nZ:\n{zw.arr}")
print("#####")
print(hw.translate(0,1))
# print(hw.arr)
``` |
{
"source": "6851-2021/range-trees",
"score": 3
} |
#### File: 6851-2021/range-trees/test_rangetree.py
```python
import unittest
from rangetree import TreeNode
class TestRangeTree(unittest.TestCase):
def test_pred(self):
for num in [2, 4, 10, 17, 127]:
tree = TreeNode.create_from_sorted_list(range(0, num * 2, 2))
self.assertIsNone(tree.pred(0))
for k in range(1, num * 2 + 2, 2):
pred = tree.pred(k).key
self.assertEqual(pred, max(v for v in range(0, num * 2, 2) if v < k))
def test_traverse(self):
tree = TreeNode.create_from_sorted_list(range(23))
self.assertEqual([node.key for node in tree.traverse_leaves()], list(range(23)))
def test_range_query(self):
for num in [2, 4, 10, 17, 127]:
tree_range = range(0, num * 2, 2)
tree = TreeNode.create_from_sorted_list(tree_range)
for start in range(-1, num*2 + 1):
for end in range(start, num*2 + 2):
expected_keys = [key for key in tree_range if start <= key <= end]
expected_count = len(expected_keys)
top_nodes = list(tree.range_query(start, end))
actual_count = sum((st.size for st in top_nodes), start=0)
all_keys = [d.key for st in top_nodes for d in st.traverse_leaves()]
self.assertEqual(expected_count, actual_count)
self.assertEqual(all_keys, [key for key in tree_range if start <= key <= end])
``` |
{
"source": "6851-2021/retroactive-priority-queue",
"score": 3
} |
#### File: retroactive-priority-queue/retropq/zero_prefix_bst.py
```python
from .treap import Treap
class MinPrefixSumAggregator:
def __init__(self, key, value):
self.sum = value
self.min_key = key
self.max_key = key
self.min_prefix_sum = value
self.min_prefix_first_key = key
self.min_prefix_last_key = key
def __add__(self, other):
res = MinPrefixSumAggregator(None, 0)
res.sum = self.sum + other.sum
res.min_key = self.min_key
res.max_key = other.max_key
other_min_prefix_sum = self.sum + other.min_prefix_sum
res.min_prefix_sum = min(self.min_prefix_sum, other_min_prefix_sum)
if self.min_prefix_sum <= res.min_prefix_sum:
res.min_prefix_first_key = self.min_prefix_first_key
else:
res.min_prefix_first_key = other.min_prefix_first_key
if other_min_prefix_sum <= res.min_prefix_sum:
res.min_prefix_last_key = other.min_prefix_last_key
else:
res.min_prefix_last_key = self.min_prefix_last_key
return res
class ZeroPrefixBST(Treap):
def __init__(self):
super().__init__(lambda x, y: x + y)
def zero_prefix_before(self, key):
# Returns the maximum k <= key such that the values of all operations
# with keys <k sum up to 0
res = self.agg_before(key, include_eq=False)
if res is None:
return key
elif res.min_prefix_sum > 0:
return min(res.min_key, key)
elif res.sum == 0:
return max(res.min_prefix_last_key, key)
else:
return res.min_prefix_last_key
def zero_prefix_after(self, key):
# Returns the minimum k >= key such that the values of all operations
# with keys <= k sum to 0 (and None if no such k exists)
after_res = self.agg_after(key, include_eq=False)
if after_res is None:
return key
before_sum = self.agg().sum - after_res.sum
min_prefix_in_res = before_sum + after_res.min_prefix_sum
if before_sum == 0:
return key
elif min_prefix_in_res == 0:
return after_res.min_prefix_first_key
else:
return None
def __getitem__(self, key):
return super().__getitem__(key).sum
def __setitem__(self, key, value):
super().__setitem__(key, MinPrefixSumAggregator(key, value))
def __iter__(self):
for k, v in super().__iter__():
yield k, v.sum
```
#### File: retroactive-priority-queue/test/test_rpq_manual.py
```python
import unittest
from retropq import RetroactivePriorityQueue
class PriorityQueueManualTest(unittest.TestCase):
def test_simple(self):
queue = RetroactivePriorityQueue()
self.assertEqual([], list(queue))
queue.add_insert(0, 5)
self.assertEqual([5], list(queue))
queue.add_insert(10, 3)
self.assertEqual([3, 5], list(queue))
queue.add_delete_min(5)
self.assertEqual([3], list(queue))
queue.add_insert(2, 7)
self.assertEqual([3, 7], list(queue))
queue.add_insert(3, 4)
self.assertEqual([3, 5, 7], list(queue))
queue.add_delete_min(7)
self.assertEqual([3, 7], list(queue))
# delete insert
queue.remove(2)
self.assertEqual([3], list(queue))
# delete delete
queue.remove(5)
self.assertEqual([3, 5], list(queue))
def test_get_min(self):
queue = RetroactivePriorityQueue()
self.assertEqual(None, queue.get_min())
queue.add_insert(2, 3)
queue.add_insert(5, 8)
self.assertEqual(3, queue.get_min())
queue.remove(2)
self.assertEqual(8, queue.get_min())
``` |
{
"source": "6859-sp21/final-project-urbantransformationsharvardsq-covid-19",
"score": 2
} |
#### File: python/yamnet/classifyAudio.py
```python
import os
import numpy as np
import soundfile as sf
import matplotlib.pyplot as plt
import sys
sys.path.append('./')
import yamnet.params as yamnet_params
import yamnet.yamnet as yamnet_model
import tensorflow as tf
import subprocess
PATH_YAMNET_CLASSES = "./yamnet/yamnet_class_map.csv"
PATH_YAMNET_WEIGHTS = "./yamnet/yamnet.h5"
def classifyWav(wavPath, topClasses):
semanticResults = {}
path = wavPath.split("/")
filename = path[-1].split(".")[0]
# this is our temp folder we read and write the channels to
targetFolder = '/'.join(path[:-2]) + "/splitChannels/"
channels = 2
# we delete all of the content first in the temp folder
try:
subprocess.call(f"rm {targetFolder}*.wav", shell=True)
except:
pass
if channels == 4:
subprocess.call(f"ffmpeg -i '{wavPath}' -map_channel 0.0.0 {targetFolder + filename}_ch0.wav \
-map_channel 0.0.1 {targetFolder + filename}_ch1.wav \
-map_channel 0.0.2 {targetFolder + filename}_ch2.wav \
-map_channel 0.0.3 {targetFolder + filename}_ch3.wav", shell=True)
elif channels == 2:
subprocess.call(f"ffmpeg -i '{wavPath}' -map_channel 0.0.0 {targetFolder + filename}_ch0.wav \
-map_channel 0.0.1 {targetFolder + filename}_ch1.wav", shell=True)
for i, wavfile in enumerate(os.scandir(targetFolder)):
# the results of the current channel
chResults = {}
#print(wavfile.path)
#print(wavfile.name)
wav_data, sr = sf.read(wavfile.path, dtype=np.int16)
waveform = wav_data / 32768.0
# The graph is designed for a sampling rate of 16 kHz, but higher rates should work too.
# We also generate scores at a 10 Hz frame rate.
params = yamnet_params.Params(sample_rate=sr, patch_hop_seconds=1)
# Set up the YAMNet model.
class_names = yamnet_model.class_names(PATH_YAMNET_CLASSES)
yamnet = yamnet_model.yamnet_frames_model(params)
yamnet.load_weights(PATH_YAMNET_WEIGHTS)
# Run the model.
scores, embeddings, _ = yamnet(waveform)
scores = scores.numpy()
mean_scores = np.mean(scores, axis=0)
# we take the top 3
top_N = topClasses
top_class_indices = np.argsort(mean_scores)[::-1][:top_N]
# these are our scores rows = classes , cols = seconds
top_scores = scores[:, top_class_indices].T
yticks = range(0, top_N, 1)
#class_names = [class_names[top_class_indices[x]] for x in yticks]
# we need to match the classes later in the front - end
class_names = top_class_indices
for col in range(0, np.shape(top_scores)[-1]):
curr_col = top_scores[:, col].flatten()
chResults[col] = {int(cln):round(float(prct), 2) for cln, prct in zip(class_names, curr_col)}
semanticResults[i] = chResults
print(semanticResults)
return semanticResults
if __name__ == '__main__':
fourPath = "/home/lukas/99_TMP/05_Elina_Thesis/MIT_Thesis/data/wavFiles/4/200630_001.WAV"
classifyWav(fourPath, 2)
``` |
{
"source": "68696c6c/dotbot-sudo",
"score": 2
} |
#### File: 68696c6c/dotbot-sudo/sudo.py
```python
import subprocess, dotbot, json
from os import path, remove
from dotbot.util import module
class Sudo(dotbot.Plugin):
_directive = 'sudo'
def can_handle(self, directive):
return self._directive == directive
def handle(self, directive, data):
if directive != self._directive:
raise ValueError('sudo cannot handle directive %s' %
directive)
app = self._find_dotbot()
base_directory = self._context.base_directory()
data = [{'defaults': self._context.defaults()}] + data
plugins = self._collect_plugins()
sudo_conf = path.join(path.dirname(__file__), 'sudo.conf.json')
self._write_conf_file(sudo_conf, data)
proc_args = [
'sudo', app,
'--base-directory', base_directory,
'--config-file', sudo_conf
] + plugins
self._log.debug('sudo: args to pass: {}'.format(proc_args))
try:
self._log.lowinfo('sudo: begin subprocess')
subprocess.check_call(
proc_args,
stdin=subprocess.PIPE)
self._log.lowinfo('sudo: end subprocess')
self._delete_conf_file(sudo_conf)
return True
except subprocess.CalledProcessError as e:
self._log.lowinfo('sudo: end subprocess')
self._log.error(e)
return False
def _collect_plugins(self):
ret = []
for plugin in module.loaded_modules:
# HACK should we compare to something other than _directive?
if plugin.__name__ != self._directive:
ret.extend(iter(['--plugin', plugin.__file__]))
return ret
def _delete_conf_file(self, conf_file):
if path.exists(conf_file):
remove(conf_file)
def _find_dotbot(self):
base = path.dirname(path.dirname(dotbot.__file__))
ret = path.join(base, 'bin', 'dotbot')
self._log.debug('sudo: dotbot app path: {}'.format(ret))
return ret
def _write_conf_file(self, conf_file, data):
self._delete_conf_file(conf_file)
with open(conf_file, 'w') as jfile:
json.dump(data, jfile, ensure_ascii=False)
``` |
{
"source": "687vex/nerdylib-docs",
"score": 2
} |
#### File: source/_ext/rst_roles.py
```python
from sphinx.application import Sphinx
from docutils.parsers.rst import roles
from docutils import nodes
from docutils.parsers.rst.states import Inliner
def strike_role(role, rawtext, text, lineno, inliner: Inliner, options={}, content=[]):
your_css_strike_name = 'strike'
return nodes.inline(rawtext, text, **dict(classes=[your_css_strike_name])), []
def setup(app: Sphinx):
"""Install the plugin.
:param app: Sphinx application context.
"""
roles.register_canonical_role('my-strike', strike_role) # usage: :my-strike:`content ...`
``` |
{
"source": "6895mahfuzgit/Calculus_for_Machine_Learning",
"score": 4
} |
#### File: 6895mahfuzgit/Calculus_for_Machine_Learning/delta_method_practise.py
```python
import numpy as np
import matplotlib.pyplot as plt
x=np.linspace(-10,10,1000)
#function y=x^2+2x+2
def f(x):
x=x**2+2*x+2
return x
x_delta=0.000001
x=-1
#find the slop of x=-1
y=f(x)
print('y:',y)
#the point P is (-1,1)
x2=x+x_delta
print('x2 :',x2)
y2=f(x2)
print('y2 :',y2)
m=(y2-y)/(x2-x)
print('m :',m)
```
#### File: 6895mahfuzgit/Calculus_for_Machine_Learning/fitting_a_line_with_machine_learning.py
```python
import torch as th
import matplotlib.pyplot as plt
def refression(my_x, my_m, my_b):
return my_m*my_x+my_b
def regression_plot(my_x,my_y,my_m,my_b):
fig,ax=plt.subplots()
ax.scatter(my_x,my_y)
x_min,x_max=ax.get_xlim()
y_min,y_max=my_m*x_min+my_b,my_m*x_max+my_b
ax.set_xlim([x_min,x_max])
_=ax.plot([x_min,x_max],[y_min,y_max])
x = th.tensor([0, 1., 2., 3., 4., 5., 6., 7.])
print(x)
y = -0.5*x+2+th.normal(mean=th.zeros(8), std=0.2)
print(y)
m = th.tensor([0.9]).requires_grad_()
print(m)
b = th.tensor([0.1]).requires_grad_()
print(b)
# Forward pass(1)
yhat = refression(x, m, b)
print(yhat)
# Compare yhat with y actual value
def mse(my_yhat, my_y):
sigma = th.sum((my_yhat-my_y)**2)
return sigma/len(my_y)
C=mse(yhat,y)
print(C)
print(C.backward())
print(m.grad)
print(b.grad)
optimizer=th.optim.SGD([m,b],lr=0.01)
optimizer.step()
print(m)
print(b)
#regression_plot(x,y,m,b)
C=mse(refression(x, m, b),y)
print(C)
epoches=1000
for epoche in range(epoches):
optimizer.zero_grad()
yhat = refression(x, m, b)
C=mse(yhat,y)
C.backward()
optimizer.step()
regression_plot(x,y,m,b)
print(m.item())
print(b.item())
```
#### File: 6895mahfuzgit/Calculus_for_Machine_Learning/limit_infinity.py
```python
import numpy as np
import matplotlib.pyplot as plt
x=np.linspace(-10,10,1000)
#lim x->infinity =25/x
def calculate(x):
x=25/x
return x
y=calculate(x)
fig,ax=plt.subplots()
plt.axvline(x=0,color='lightgray')
plt.axhline(y=0,color='lightgray')
plt.xlim(-10,10)
plt.ylim(-300,300)
plt.axvline(x=0,color='purple',linestyle='--')
plt.axhline(y=1,color='purple',linestyle='--')
_ =ax.plot(x,y)
```
#### File: 6895mahfuzgit/Calculus_for_Machine_Learning/limits_2.py
```python
import numpy as np
import matplotlib.pyplot as plt
#limit x->1=(x^2-1)/(x-1)
x=np.linspace(-10,10,1000)
def calculate_limit(x):
x=(x**2-1)/(x-1)
return x
# result=calculate_limit(1)
# print(result)
# result=calculate_limit(0.9)
# print(result)
# result=calculate_limit(0.999)
# print(result)
# result=calculate_limit(1.1)
# print(result)
# result=calculate_limit(1.001)
# print(result)
y=calculate_limit(x)
fig,ax=plt.subplots()
plt.axvline(x=0,color='lightgray')
plt.axhline(y=0,color='lightgray')
plt.xlim(-1,5)
plt.ylim(-1,5)
plt.axvline(x=1,color='purple',linestyle='--')
plt.axhline(y=2,color='purple',linestyle='--')
_ =ax.plot(x,y)
``` |
{
"source": "689/pylibgen",
"score": 3
} |
#### File: pylibgen/pylibgen/pylibgen.py
```python
import os
import re
import requests
import webbrowser
from urllib.parse import quote_plus
from . import constants
class Library(object):
"""Library Genesis interface wrapper."""
def __init__(self, mirror=constants.DEFAULT_MIRROR):
assert(mirror in constants.MIRRORS)
self.mirror = mirror
def __repr__(self):
return '<Library using mirror {}>'.format(self.mirror)
def __str__(self):
return self.__repr__
def search(self, query, type='title'):
"""Searches Library Genesis.
Note:
For search type isbn, either ISBN 10 or 13 is accepted.
Args:
query (str): Search query.
type (str): Query type. Can be title, author, isbn.
Returns:
List of LibraryGenesis book IDs that matched the query.
"""
assert(type in {'title', 'author', 'isbn'})
r = self.__req('search', {
'req': quote_plus(query),
'column': type,
})
return re.findall("<tr.*?><td>(\d+)", r.text)
def lookup(self, ids, fields=constants.DEFAULT_FIELDS):
"""Looks up metadata on Library Genesis books.
Note:
To get book IDs, use search(). The default fields
suffice for most use cases, but there are a LOT more
like openlibraryid, publisher, etc. To get all fields,
use fields=['*'].
Args:
ids (list): Library Genesis book IDs.
fields (list): Library Genesis book properties.
Returns:
List of dicts each containing values for the specified
fields for a Library Genesis book ID.
A single dict if only one str or int id is passed in.
"""
# Allow for lookup of a single numeric string by auto casting
# to a list for convenience.
if isinstance(ids, (str, int)):
ids = [str(ids)]
res = self.__req('lookup', {
'ids': ','.join(ids),
'fields': ','.join(fields),
}).json()
if not res:
# https://github.com/JoshuaRLi/pylibgen/pull/3
raise requests.HTTPError(400)
return res if len(res) > 1 else res[0]
def get_download_url(self, md5, enable_ads=False):
"""Gets a direct download URL to a Library Genesis book.
Note:
This is actually specific only to the libgen.io mirror!
Will need to be rewritten if things change.
Use lookup() to obtain the MD5s for Library Genesis books.
To support Library Genesis, pass True to enable_ads.
See the package README for more detail.
Args:
md5 (str): Library Genesis unique book identifier checksum.
enable_ads (bool): Toggle ad bypass via direct download key
scraping.
Returns:
A direct download URL.
"""
url = self.__req('download', {'md5': md5}, urlonly=True)
if enable_ads:
return url
r = self.__req('download', {'md5': md5})
key = re.findall("&key=(.*?)'", r.text)[0]
return '{}&key={}'.format(url, key)
def download(self, md5, dest='.', use_browser=False):
"""Downloads a Library Genesis book.
Note:
Libgen seems to delay programmatically sent dl requests, even
if the UA string is spoofed and the URL contains a good key,
so I recommend just using get_download_url. Alternatively, you
can set use_browser=True, which will just open up the download
URL in a new browser tab.
Note that if you spam download requests, libgen will temporarily
503. Again, I recommend using get_download_url and downloading
from the browser.
Args:
md5 (str): Library Genesis unique book identifier checksum.
dest (str): Path to download directory.
use_browser (bool): Use browser to download instead.
"""
auth_url = self.get_download_url(md5, enable_ads=False)
if use_browser:
webbrowser.open_new_tab(auth_url)
return
r = requests.get(auth_url)
r.raise_for_status()
with open(os.path.join(dest, md5), 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
def __req(self, endpoint, getargs, urlonly=False):
url = constants.ENDPOINTS[endpoint].format(
mirror=self.mirror, **getargs
)
if urlonly:
return url
r = requests.get(url)
r.raise_for_status()
return r
```
#### File: pylibgen/tests/test_pylibgen.py
```python
from pylibgen import Library, constants
# Ensure that API endpoints are working and returning the
# expected responses for all mirrors.
def test_api_endpoints():
for mirror in constants.MIRRORS:
lg = Library(mirror)
ids = lg.search('automate the boring stuff', 'title')
assert isinstance(ids, list)
assert set(ids) == set([
'1421206', '1421207', '1421208', '1351717',
'1381538', '1381540', '1529338',
])
books = lg.lookup(ids)
assert isinstance(books, list)
assert isinstance(books[0], dict)
assert {book['md5'] for book in books} == {
'd826b3e593b12422784f50d59c97a966',
'b34564156c3778261ed03167b09f6694',
'4e0efdd614737fd66408fd43a9d5ff10',
'5a64e12e79af379110a31ea04bb6320c',
'c157d6ec28d1a7c4b528f4e6a1ea4c9e',
'054255117b2e86251415292ef48320fd',
'1af2c71c1342e850e1e47013b06f9eb9',
}
book = lg.lookup(1421206)
assert isinstance(book, dict)
assert book['md5'] == '1af2c71c1342e850e1e47013b06f9eb9'
lg.get_download_url(books[0]['md5'])
``` |
{
"source": "691505789/Hierarchical-Double-Attention-Neural-Networks-for-Sentiment-Classification",
"score": 3
} |
#### File: Hierarchical-Double-Attention-Neural-Networks-for-Sentiment-Classification/model/common.py
```python
from keras.callbacks import Callback
from keras.utils import to_categorical
import numpy as np
def get_batch(dataset, classes):
epoch = dataset.epoch
epochs = np.arange(epoch)
while True:
np.random.shuffle(epochs)
for i in epochs:
inpt_data = dataset.docs[i]
outpt_data = to_categorical(dataset.label[i],num_classes=classes)
yield (inpt_data, outpt_data)
class TestHistory(Callback):
def __init__(self,dataset):
self.best_acc = []
self.dataset = dataset
def on_epoch_end(self, epoch, logs={}):
score, acc = self.model.evaluate_generator(get_batch(self.dataset),steps=self.dataset.epoch)
self.best_acc.append(acc)
print("best test -los:{} -acc:{}".format(score,acc))
def on_train_end(self,logs={}):
print("test acc list: "+str(self.best_acc))
print("BestTest acc:{}".format(max(self.best_acc)))
class ValTestLog(Callback):
def __init__(self,dataset,classes):
self.val_acc = []
self.test_acc = []
self.dataset = dataset
self.classes = classes
def on_epoch_end(self, epoch, logs={}):
acc_val = logs.get('val_acc')
self.val_acc.append(acc_val)
score, acc_test = self.model.evaluate_generator(get_batch(self.dataset, self.classes),steps=self.dataset.epoch)
self.test_acc.append(acc_test)
print("test -los:{} -acc:{}".format(score, acc_test))
def on_train_end(self, logs={}):
val_test_acc = [(val, test) for val, test in zip(self.val_acc,self.test_acc)]
val_test_acc = sorted(val_test_acc,key=lambda a:a[0],reverse=True)
print("BestTestAcc:{}".format(val_test_acc[0]))
with open("./result", 'a') as f:
f.write("Model bset val_acc and test_acc:\n")
f.write(str(val_test_acc[:3])+"\n")
``` |
{
"source": "6923403/Python_Demo",
"score": 3
} |
#### File: demo/pinmu38/pinmu38.py
```python
import requests
import os
import re
from Crypto.Cipher import AES #pip install pycryptodome
from copy import deepcopy
import time
"""
https://v.pinimg.com/videos/mc/hls/38/20/a0/3820a0682a5312c71c0bd9a831cfcc7c_480w_20200426T074057_00001.ts
https://v.pinimg.com/videos/mc/hls/38/20/a0/3820a0682a5312c71c0bd9a831cfcc7c.m3u8
"""
def main():
path = r"N:\codes\m3"
file_dirlist = os.listdir(path) # 读目录
for m3u8file in file_dirlist:
if len(m3u8file.strip().split(".")) > 1:
if m3u8file.strip().split(".")[-1] == "m3u8":
name = m3u8file.strip().split(".")[0].strip().replace(",", "").replace(" ", "")
#Wait build
if __name__ == '__main__':
main()
print("Over .")
"""
mu38
open .mu38
ts and key use ffmpeg
re:
https://github.com/duhaijun/m3u8-to-mp4
https://github.com/mrzhangfelix/m3u8Downloader
https://github.com/DVLZY/video_download_sanjieke.cn
"""
```
#### File: Python_Demo/test/add_end.py
```python
def add_end(L = []):
L.append('END')
return L
```
#### File: Python_Demo/test/file_path.py
```python
import os
#win下好用
def get_desk_p():
t = os.path.join(os.path.expanduser('~'),"Desktop")
print(t)
return os.path.join(os.path.expanduser('~'),"Desktop")
print(get_desk_p())
```
#### File: test/generator/generator1.py
```python
g = (x * x for x in range(10))
#for n in g:
# print(n)
def fib(max):
n, a, b = 0, 0, 1
while n < max:
#print(b)
yield b
a, b = b, a + b
n = n + 1
return 'done'
def main():
fib(7)
if __name__ == '__main__':
main()
```
#### File: test/generator/yanghui_triangle.py
```python
def triangle():
L = [1]
while True:
yield L
L = [sum(i) for i in zip([0]+L, L+[0])]
```
#### File: Python_Demo/test/product.py
```python
def product(*kw):
if len(kw) == 0:
return
else:
sum = 1
for n in kw:
sum = sum * n
return sum
def main():
print('product(5) =', product(5))
print('product(5, 6) =', product(5, 6))
print('product(5, 6, 7) =', product(5, 6, 7))
print('product(5, 6, 7, 9) =', product(5, 6, 7, 9))
if product(5) != 5:
print('测试失败!')
elif product(5, 6) != 30:
print('测试失败!')
elif product(5, 6, 7) != 210:
print('测试失败!')
elif product(5, 6, 7, 9) != 1890:
print('测试失败!')
else:
try:
product()
print('测试失败!')
except TypeError:
print('测试成功!')
if __name__ == '__main__':
main()
```
#### File: Python_Demo/test/quadratic.py
```python
import math
def main():
print('q_math(2, 3, 1) = ', q_math(2, 3, 1))
print('q_math(1, 3, -4) = ', q_math(1, 3, -4))
def q_math(a, b, c):
in_a = float(b **2 - 4 * a * c)
if in_a < 0:
printf('no Result')
x1 = -b + (math.sqrt(in_a)) / (2 * a)
x2 = -b - (math.sqrt(in_a)) / (2 * a)
if in_a >= 0:
print('x1: ', x1, 'x2: ', x2)
return x1, x2
if __name__ == "__main__":
main()
``` |
{
"source": "69495/Zooarchaeologist",
"score": 3
} |
#### File: Zooarchaeologist/mstdn/mining.py
```python
from mastodon import Mastodon
import json
from login import login
from out_json import jsoner
'''
return_typeはjson or list
defaultはlistで返す
'''
def mining(id, return_type="list", switch=None):
print(return_type + " is selected!")
Mastodon = login(switch)
#timelineからlastestなmax_idを取得
tl = Mastodon.timeline_local(limit=1)
initial_max_id = tl[0]['id']
toot = Mastodon.account_statuses(id, initial_max_id, None, 40)
while True:
last_max_id = toot[len(toot)-1]['id']
#続きのtootを取得
last_toot = Mastodon.account_statuses(id, last_max_id, None, 40)
toot.extend(last_toot)
# final_max_lenge = len(toot)-1
final_max_lenge = len(last_toot) -1
# account = Mastodon.account(id)
# count = account['statuses_count']
toot_count = toot[0]['account']['statuses_count']
print(str(len(toot)) + '/' + str(toot_count))
if final_max_lenge < 39:
break
if return_type == "json":
filename = str(id)
jsoner(toot,filename)
else:
return toot
# id = int(input())
# mining(id)
if __name__ == '__mining__':
mining()
```
#### File: Zooarchaeologist/mstdn/out_json.py
```python
import json
from datetime import datetime
def jsoner(data,filename_ahead):
time = datetime.now().strftime("%Y-%m-%d-%H%M")
filename = filename_ahead + "_" + time + "_.json"
with open(filename, "w") as fp:
json.dump(data,fp)
print("complete!")
if __name__ == '__out_json__':
out_json()
``` |
{
"source": "696GrocuttT/HomeAssistant-OctopusEnergy",
"score": 2
} |
#### File: custom_components/octopus_energy/utils.py
```python
from homeassistant.util.dt import (utcnow, as_utc, parse_datetime)
import re
from .const import (
REGEX_TARIFF_PARTS
)
def get_tariff_parts(tariff_code):
matches = re.search(REGEX_TARIFF_PARTS, tariff_code)
if matches == None:
raise Exception(f'Unable to extract product code from tariff code: {tariff_code}')
# According to https://www.guylipman.com/octopus/api_guide.html#s1b, this part should indicate if we're dealing
# with standard rates or day/night rates
energy = matches[1]
rate = matches[2]
product_code = matches[3]
region = matches[4]
return {
"energy": energy,
"rate": rate,
"product_code": product_code,
"region": region
}
def get_active_tariff_code(agreements):
now = utcnow()
latest_agreement = None
latest_valid_from = None
# Find our latest agreement
for agreement in agreements:
valid_from = as_utc(parse_datetime(agreement["valid_from"]))
if latest_valid_from == None or valid_from > latest_valid_from:
latest_agreement = agreement
latest_valid_from = valid_from
if latest_agreement != None:
now = utcnow()
latest_valid_to = None
if "valid_to" in latest_agreement and latest_agreement["valid_to"] != None:
latest_valid_to = as_utc(parse_datetime(latest_agreement["valid_to"]))
# If there is no end for our latest agreement, then it is our most active
if latest_valid_to == None or latest_valid_to >= now:
return latest_agreement["tariff_code"]
return None
# Adapted from https://www.theenergyshop.com/guides/how-to-convert-gas-units-to-kwh
def convert_kwh_to_m3(value):
m3_value = value * 3.6 # kWh Conversion factor
m3_value = m3_value / 40 # Calorific value
return round(m3_value / 1.02264, 3) # Volume correction factor
``` |
{
"source": "69kosh/streamPy",
"score": 2
} |
#### File: examples/ava/MNV2.py
```python
from streampy.units.tf.modelPath import modelPath
from tensorflow.python.keras.layers.convolutional import *
from tensorflow.python.keras.layers.normalization import *
from tensorflow.python.keras.layers.core import *
from tensorflow.python.keras.layers.merge import *
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.optimizers import *
from tensorflow.python.keras.applications.mobilenet_v2 import MobileNetV2
class MNV2(modelPath):
'''
Класс работы с моделью MobileNetV2
'''
def __init__(self, config):
self.inputShape = config.get('inputShape', [224, 224])
self.outputShape = config.get('outputShape', 1)
return super().__init__(config)
def create(self):
inputShape = self.inputShape
outputShape = self.outputShape
baseModel = MobileNetV2(include_top=False,# weights=None,
alpha=1.0,
input_shape = (inputShape[0], inputShape[1], 3))
l1 = 0#.001
l2 = 0#.001#.0001
dropout = 0.25
out1 = baseModel.get_layer('block_3_expand_relu').output
out1 = Conv2D(128, (1, 1), padding='valid',
activity_regularizer=regularizers.l1_l2(l1, l2))(out1)
out1 = BatchNormalization()(out1)
out1 = Activation('relu')(out1)
out1 = MaxPooling2D((8, 8))(out1)
out1 = Dropout(dropout)(out1)
out2 = baseModel.get_layer('block_6_expand_relu').output
out2 = Conv2D(128, (1, 1), padding='valid',
activity_regularizer=regularizers.l1_l2(l1, l2))(out2)
out2 = BatchNormalization()(out2)
out2 = Activation('relu')(out2)
out2 = MaxPooling2D((4, 4))(out2)
out2 = Dropout(dropout)(out2)
out3 = baseModel.get_layer('block_13_expand_relu').output
out3 = Conv2D(256, (1, 1), padding='valid',
activity_regularizer=regularizers.l1_l2(l1, l2))(out3)
out3 = BatchNormalization()(out3)
out3 = Activation('relu')(out3)
out3 = MaxPooling2D((2, 2))(out3)
out3 = Dropout(dropout)(out3)
out4 = baseModel.get_layer('out_relu').output
out4 = Conv2D(512, (1, 1), padding='valid',
activity_regularizer=regularizers.l1_l2(l1, l2))(out4)
out4 = BatchNormalization()(out4)
out4 = Activation('relu')(out4)
out4 = Dropout(dropout)(out4)
out = Concatenate(axis=3)([out1, out2, out3, out4])
# out = Conv2D(512, (1, 1), padding='valid',
# activity_regularizer=regularizers.l1_l2(l1, l2))(out)
# out = BatchNormalization()(out)
# out = Activation('relu')(out)
# out = Dropout(dropout)(out)
out = Conv2D(256, (1, 1), padding='valid',
activity_regularizer=regularizers.l1_l2(l1, l2))(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = MaxPooling2D((2, 2))(out)
out = Flatten()(out)
# out = GlobalMaxPool2D()(out)
# out = baseModel.output
out = Dense(256, activity_regularizer=regularizers.l1_l2(l1, l2) )(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Dense(256, activity_regularizer=regularizers.l1_l2(l1, l2) )(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Dense(outputShape)(out)
out = Activation('sigmoid')(out)
model = Model(inputs=baseModel.input, outputs=out)
for layer in baseModel.layers:
layer.trainable = False
model.summary()
return model
```
#### File: streamPy/streampy/segment.py
```python
from queue import Queue
from streampy.units.base.loader import getClass
class Segment(object):
'''
Создаём сегмент обработки данных
Создаём его из конфвы
'''
def __init__(self, config):
self.config = config
'''
Constructor
'''
def start(self):
config = self.config
self.units = {}
units = {}
outs = {}
ins = {}
for name in config['units']:
print(name)
unitConfig = config['units'][name]
if name not in ins:
ins[name] = {}
if name not in outs:
outs[name] = {}
if 'in' in unitConfig:
for key in unitConfig['in']:
outUnit = unitConfig['in'][key]['from']
if outUnit in units:
if 'out' in unitConfig['in'][key]:
# если указан аут явно - используем его
outName = unitConfig['in'][key]['out']
else:
#иначе используем имя входа
outName = key
if 'size' in unitConfig['in'][key]:
queueSize = unitConfig['in'][key]['size']
else:
queueSize = 1
# print('create queue from {}.{} to {}.{} size {}'
# .format(outUnit, outName, name, key, queueSize))
q = Queue(queueSize)
# if outUnit not in queues:
# queues[outUnit] = {}
if outUnit not in outs:
outs[outUnit] = {}
if outName not in outs[outUnit]:
outs[outUnit][outName] = []
# queues[outUnit][name] = q
ins[name][key] = q
outs[outUnit][outName].append(q) #
# print('{} - {} -> {} - {}'.format(name, key, outUnit, outName))
else:
print('unit not found: ' + outUnit)
units[name] = unitConfig
for name in config['units']:
cls = getClass(units[name]['module'], 'Pool')
thread = cls(units[name], ins[name], outs[name])
thread.setDaemon(True)
thread.start()
self.units[name] = thread
# self.units[name] = Pool(units[name], ins[name], outs[name])
def stop(self):
pass
```
#### File: units/base/socketServer.py
```python
from streampy.units.base.loader import getClass
from threading import Thread
import queue
from typing import Iterable
import socket
class Worker (Thread):
def __init__(self, connect, outs, config):
'''
Constructor
'''
Thread.__init__(self)
self.connect = connect
self.outs = outs
self.config = config.get('config', {})
self.init()
def init(self):
pass
def run(self):
# инициируем
# запускаем цикл
# В цикле
while True:
# формируем входной пакет: свои правила
# или один к одному или собираем из нескоьких очередей, или пакетом
# print('before prepare {}'.format(self.__class__.__name__))
result = self.prepare()
if not result:
break
# раскидываем данные по выходам
# print('before send {}'.format(self.__class__.__name__))
self.send(result[0], result[1])
# проверяем - а не пора ли спать
def prepare(self):
print('Abstract method in {} not implemented'.format(self.__class__.__name__))
return []
def send(self, outData, outMeta):
if isinstance(outData, Iterable):
for row in outData:
for key in row:
if key in self.outs:
package = {'data':row[key], 'meta':outMeta}
for queue in self.outs[key]:
queue.put(package)
# self.outs[key][queue].put(package)
# print('putted')
# print(package)
else:
print('out not found - {}'.format(key))
class Pool(Thread):
def __init__(self, config, inQueues, outQueues):
'''
Constructor
'''
Thread.__init__(self)
self.config = config;
self.inQueues = inQueues;
self.outQueues = outQueues;
self.threadsCount = config.get('threads', 1)
# инициируем массив потоков
# мы будем заполнять от 0 до threadsCount их,
# выкидывая нерабочие, но если закончатся те что есть
# то новых соединений принимать не будем.
self.threads = {};
socketPort = config.get('port', 9090)
socketListen = config.get('listen', 1)
self.socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
self.socket.bind(('', socketPort))
self.socket.listen(socketListen)
def run(self):
cls = getClass(self.config['module'], 'Worker')
while True:
# ждем очередной коннект
connect, addr = self.socket.accept()
# ищем место для него
index = None
# пробегаем по всем трендам
for i in range(0, self.threadsCount):
if self.threads.get(i, None) == None:
index = i
break
self.threads[i].join(timeout=0.0)
if not self.threads[i].is_alive():
index = i
break
if index == None:
# не нашли свободного места
connect.close()
else:
# создаем для него поток и обрабатываем там
thread = cls(connect, self.outQueues, self.config)
thread.setDaemon(True)
thread.start()
# добавляем в реестр тредов
self.threads[index] = thread
print(thread)
```
#### File: units/common/getField.py
```python
from streampy.units.base.pooled import Pool, Worker as Base
class Worker(Base):
'''
Извлекаем заданное поле и добавляем к нему постфикс и префикс
'''
def process(self, inData, inMeta):
config = self.config
value = inData['row'][config.get('field', 0)]
if isinstance(value, str):
value = config.get('prefix', '') + value + config.get('postfix', '')
# print(value)
return [{'field':value}]
```
#### File: units/common/getRowFromBboxes.py
```python
import csv
from random import shuffle, randint
from streampy.units.base.pooled import Pool, Worker as Base
class Worker(Base):
'''
Считываем текстовый файл с multi bounding boxes
первая строка - имя файла
вторая строка - кол-во ббоксов
третья строка+ - сами боксы
'''
def init(self):
# print('getRowFromCSV.init')
self.fullData = []
config = self.config
with open(config['file'], 'r') as csvfile:
reader = csv.reader(csvfile, delimiter = ' ')
isFilename = True
isBboxCount = False
isBbox = 0
dataRow = {}
for row in reader:
# print(row)
if isFilename:
isFilename = False
isBboxCount = True
dataRow['filename'] = row[0]
elif isBboxCount:
isBboxCount = False
if int(row[0]) > 0:
isBbox = int(row[0])
else:
isBbox = 1
elif isBbox > 0:
isBbox -= 1
if 'bboxes' not in dataRow:
dataRow['bboxes'] = []
dataRow['bboxes'].append(row)
if isBbox == 0:
isFilename = True
self.fullData.append(dataRow)
# print(dataRow['filename'])
# print(dataRow)
dataRow = {}
if config.get('shuffle', False):
shuffle(self.fullData)
self.count = len(self.fullData)
self.offset = int(self.count * config.get('from', 0.0))
print(('samples count:', len(self.fullData)))
def process(self, inData, inMeta):
data = []
config = self.config
if config.get('shuffle', False):
self.offset = randint(int(self.count * config.get('from', 0.0)),
int(self.count * config.get('to', 1.0)) - 1)
else:
self.offset += 1
if self.offset > self.count * config.get('to', 1.0) - 1:
self.offset = int(self.count * config.get('from', 0.0))
# print(self.fullData[offset])
data.append({'row':self.fullData[self.offset]})
return data
def send(self, outData, outMeta):
config = self.config
if 'metaIdField' in config:
# если надо внедрять идентификатор
# то для каждого куска данных делаем отправку отдельно
for data in outData:
outMeta.update({'id':data['row'][config['metaIdField']]})
# print('putting')
# print(data)
# print(self.outs)
super().send([data], outMeta)
else:
super().send(outData, outMeta)
```
#### File: units/common/implode.py
```python
from streampy.units.base.pooled import Pool, Worker as Base
import numpy as np
import copy
class Worker(Base):
'''
Преобразуем пришедшие даные в набор связанных по мете но отдельных запросов
В последствии, после обработки, их можно будет обратно сжать сделав reduce
Так же генерим идентифкатор для мёржа для части
'''
def init(self):
self.mainBuffer = []
self.bufferSize = 0
self.entityBuffer = {}
def prepare(self):
data = {}
meta = {}
'''
Принимаем, пока не заполнили очердным все части,
или не добрались до лимита буфера
'''
size = self.config.get('bufferSize', 1000)
returnedId = None
while size > self.bufferSize:
key = list(self.ins.keys())[0] # предполагаем что у нас всегда одна очередь
package = self.ins[key].get()
entityId = package['meta']['id']
# print(package['meta'])
if entityId not in self.entityBuffer:
self.entityBuffer[entityId] = {'data':{},
'meta':copy.deepcopy(package['meta'])}
self.entityBuffer[entityId]['meta']['partKeys'] = []
self.mainBuffer.append(entityId)
# print('entity added to buffer', entityId)
partKey = package['meta']['partKey']
# print('entity:', entityId, 'partKey:', partKey, 'all count',package['meta']['partCount'])
entity = self.entityBuffer[entityId]
entity['data'][partKey] = package['data']
entity['meta']['partKeys'].append(package['meta']['partKey'])
entity['meta']['partCount'] = package['meta']['partCount']
self.bufferSize += 1
if len(entity['meta']['partKeys']) >= int(package['meta']['partCount']):
returnedId = entityId
break
if (returnedId == None) and (size <= self.bufferSize):
returnedId = self.mainBuffer.pop(0)
print('Buffer pop: ', returnedId)
if returnedId != None and returnedId in self.entityBuffer:
data = self.entityBuffer[returnedId]['data']
meta = self.entityBuffer[returnedId]['meta']
self.bufferSize -= len(self.entityBuffer[returnedId]['meta']['partKeys'])
del self.entityBuffer[returnedId]
# print ((data, meta))
return (data, meta)
# def process2(self, inData, inMeta):
# image = ''
# for k in inData:
# if not len(image):
# image = np.array(inData[k])
# else:
# image = np.append(image, inData[k], axis=1)
# # image[0:96,0:96,0:3] = np.array(inData[k])
# # print(image)
# return [{'row':np.array(image)}]
def process(self, inData, inMeta):
# print(inData)
return [{'row':inData}]
```
#### File: units/common/saveCSV.py
```python
from streampy.units.base.pooled import Pool, Worker as Base
from typing import Iterable
import csv
class Worker(Base):
'''
Воркер для сохранения данных в csv файл
'''
def init(self):
file = self.config.get('file')
delimiter = self.config.get('delimiter',' ')
quotechar = self.config.get('quotechar', '/')
csvfile = open(file, 'w', newline='')
self.writer = csv.writer(csvfile, delimiter=delimiter,
quotechar=quotechar, quoting=csv.QUOTE_MINIMAL)
def process(self, inData, inMeta):
# print('qweqweqwe')
self.writer.writerow(inData['row'])
# return [inData]
return []
# def send(self, outData, outMeta):
# self.writer.writerow(outData['row'])
```
#### File: units/datasets/getIdentityFromCelebA.py
```python
import csv
from random import shuffle, randint
from pprint import pprint
from streampy.units.base.pooled import Pool, Worker as Base
import time
class Worker(Base):
'''
Считываем csv файл мешаем его, делаем выборку
'''
def init(self):
self.data = {}
config = self.config
filename = config['filename']
imagesPath = config.get('imagesPath')
print('Loading file {}...'.format(filename))
with open(filename, 'r') as file:
reader = csv.reader(file, delimiter = ' ')
for row in reader:
if row[1] not in self.data:
self.data[row[1]] = {'id': int(row[1]), 'images':[]}
self.data[row[1]]['images'].append(imagesPath + row[0])
def process(self, inData, inMeta):
data = []
try:
row = self.data.popitem()
data.append({'identity': row[1]})
except Exception:
time.sleep(1)
pass
return data
def send(self, outData, outMeta):
config = self.config
# если надо внедрять идентификатор
# то для каждого куска данных делаем отправку отдельно
# print(outData)
for data in outData:
outMeta.update({'id':data['identity']['id']})
super().send([data], outMeta)
```
#### File: units/images/resizeImageBboxes.py
```python
import cv2
import random
import numpy as np
from streampy.units.base.pooled import Pool, Worker as Base
import copy
class Worker(Base):
'''
Аугментируем и ресайзим картинку до нужного размера
Так же преобразуем боксы, отбрасываем не поместившиеся и слишком маленькие
'''
def process(self, inData, inMeta):
config = self.config
image = inData['sample']['image']
bboxes = []
for predict in inData['sample']['predict']:
bboxes.append(copy.copy(predict))
size = (image.shape[0], image.shape[1])
# ресайзим
ratio = min([float(config['height'] / size[0]), float(config['width'] / size[1])])
x = int(size[1] * ratio);
y = int(size[0] * ratio);
image = cv2.resize(image, (x, y), interpolation = cv2.INTER_AREA )
bboxes2 = []
for predict in bboxes:
predict[0] = float(predict[0]) * ratio
predict[1] = float(predict[1]) * ratio
predict[2] = float(predict[2]) * ratio
predict[3] = float(predict[3]) * ratio
if ((predict[2] / 2 + predict[0] > 0)
and (predict[2] /2 + predict[0] < x)
and (predict[3] / 2 + predict[1] > 0)
and (predict[3] / 2 + predict[1] < y)
and (predict[2] > config.get('minWidth', 10))
and (predict[3] > config.get('minHeigth', 10))
):
bboxes2.append(predict)
# print(('ratio', ratio, 'bboxes', bboxes))
# фитим
w = config['width'] - x
h = config['height'] - y
top = h // 2
bottom = h - top
left = w // 2
right = w - left
image = cv2.copyMakeBorder(image, top, bottom, left, right,
cv2.BORDER_CONSTANT, value=[0, 0, 0])
bboxes = []
for predict in bboxes2:
predict[0] = float(predict[0]) + left
predict[1] = float(predict[1]) + top
bboxes.append(predict)
#
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#
# if ('toFloat' in config) and config['toFloat']:
# image = np.float16(image)
#
# if 'multiply' in config:
# image *= config['multiply']
return [{'sample':{'image':image, 'predict': bboxes}}]
cnt = 0
```
#### File: units/images/saveImage.py
```python
import cv2
import os
from streampy.units.base.pooled import Pool, Worker as Base
class Worker(Base):
def process(self, inData, inMeta):
basePath = self.config.get('basePath', '')
pathMetaField = self.config.get('pathMetaField', 'id')
filenameMetaField = self.config.get('filenameMetaField', 'partId')
path = basePath + str(inMeta.get(pathMetaField, '')) + '\\'
try:
os.makedirs(path)
except:
pass
filename = (path + str(inMeta.get(filenameMetaField, 'file')) + '.jpg')
try:
cv2.imwrite(filename, inData['image'])
except Exception:
print('Something wrong with file {}'.format(filename))
return []
return [{'filename':filename}]
```
#### File: units/images/showImageBboxes.py
```python
import cv2
from streampy.units.base.pooled import Pool, Worker as Base
class Worker(Base):
'''
Выводим картинку в окно
'''
def process(self, inData, inMeta):
# print(123)
image = inData['sample']['image']
for bbox in inData['sample']['predict']:
# bbox = inData['image']['predict'][i]
x = int(bbox[0])
y = int(bbox[1])
w = int(bbox[2])
h = int(bbox[3])
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
cv2.putText(image, str(bbox[4:]) , (x + 5, y + h - 8), 0, 0.4, (0,255,0))
zoom = self.config.get('zoom', 1.0)
if zoom != 1.0:
image = cv2.resize(image,
(int(image.shape[1] * zoom), int(image.shape[0] * zoom)))
cv2.imshow(self.config.get('window', 'frame'), image)
cv2.waitKey(self.config.get('wait', 0))
return []
```
#### File: units/mapReduce/map.py
```python
from streampy.units.base.pooled import Pool, Worker as Base
from typing import Iterable
import copy
from copy import deepcopy
class Worker(Base):
'''
Собираем несколько частей в одну, для этого устраиваем "буфер"
определённо длины, если не собрали до исчерпания буфера - селяви
'''
def init(self):
config = self.config.get('emit', {})
self.keyConfig = list(config.get('key', 0))
self.valueConfig = list(config.get('value', 1))
def process(self, inData, inMeta):
# print('items', inData['item'])
emits = self.emit(inData['item'])
data = []
# print(emits)
for emit in emits:
# print(emit)
data.append({'emit':{'key':'_'.join(emit[0]),
'rawKey':emit[0], 'value':emit[1]}})
# print(data)
return data
# return []
def emit(self, value):
key = []
for i in self.keyConfig:
key.append(value[i])
val = []
for i in self.valueConfig:
val.append(value[i])
# val = value[self.valueConfig]
return [(key, [val])]
def prepareSendPackage (self, outData, outMeta):
# print(outData)
data = outData['value']
meta = deepcopy(outMeta)
meta['key'] = outData['key']
meta['rawKey'] = outData['rawKey']
# print({'data':data, 'meta':meta})
return {'data':data, 'meta':meta}
```
#### File: units/socket/pickleReceiver.py
```python
from streampy.units.base.socketServer import Pool, Worker as Base
import pickle
from io import BytesIO
class Worker(Base):
def init(self):
self.buffer = BytesIO()
def prepare(self):
'''
Реализация работы с сокетом для получения pickle-пакетов
'''
result = None
# цель - получить полный пакет, который может
# быть передан в несколько пакетов передачи данных
while True:
try:
# пытаемся достать пакет
picklePos = self.buffer.tell()
result = pickle.load(self.buffer)
# print(('data!', len(result)))
self.buffer = BytesIO(self.buffer.read())
# если получилось, то пытаемся обнулить буфер,
# в случае если это последний пакет в буфере
# pos = self.buffer.tell()
# size = self.buffer.seek(0, 2)
# if pos == size:
# self.buffer.close()
# self.buffer = BytesIO()
# else:
# print((pos, size))
# self.buffer.seek(pos, 0)
break
except:
# восстанавливаем позицию
self.buffer.seek(picklePos, 0)
# если не удалось достать пакет, то пробуем
# добавить информации в буфер из сокета
try:
received = self.connect.recv(self.config.get('bufferSize', 128*1024))
except:
break
# print(('received!', len(received)))
if not received:
break
# если получили данные - добавляем их в буфер, восстанавливая позицию
pos = self.buffer.tell()
# print(('pos!', pos))
self.buffer.seek(0, 2)
pos2 = self.buffer.tell()
# print(('pos2!', pos2))
self.buffer.write(received)
self.buffer.seek(pos, 0)
pos = self.buffer.tell()
# print(('pos2!', pos))
return result
```
#### File: units/tf/fitOnBatch.py
```python
from tensorflow.python.keras.layers.convolutional import *
from tensorflow.python.keras.layers.normalization import *
from tensorflow.python.keras.layers.core import *
from tensorflow.python.keras.layers.merge import *
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.optimizers import *
import os
from tensorflow.python.keras.applications.vgg16 import VGG16
from tensorflow.python.keras.applications.mobilenet_v2 import MobileNetV2
from streampy.units.base.pooled import Pool, Worker as Base
class Worker(Base):
'''
Учим сетку
'''
def init(self):
config = self.config['config']
shape = config['shape']
loss = config['loss']
optimizer = config['optimizer']
metrics = config['metrics']
self.model = self.createModelMNV24(shape, 1)
self.model.compile(
loss=loss, #'mean_absolute_error', #'binary_crossentropy', #'mean_squared_error',
optimizer=optimizer, #Adam(lr=0.001,),#RMSprop(lr=0.1),
metrics=metrics) #'accuracy', 'mse',
'''
Компануем данные из очереди в минибатч
'''
def prepare(self):
config = self.config['config']
batch = config['batch']
dataInput = []
dataPredict = []
meta = {}
key = 'train'
while batch > 0:
package = self.ins[key].get()
dataInput.append(package['data']['image'])
dataPredict.append(package['data']['predict'])
meta.update(package['meta'])
batch -= 1
return ({'input':np.array(dataInput), 'predict':np.array(dataPredict)}, meta)
def process(self, inData, inMeta):
config = self.config['config']
inp = inData['input']
predict = inData['predict']
result = self.model.train_on_batch(x=inp, y=predict)
print(result)
def createModelVGG16(self, inputShape = (320, 320), outputShape = 1):
baseModel = VGG16(include_top=False, #weights ='imagenet',#weights=None,
input_shape = (inputShape[0], inputShape[1], 3))
# baseModel.summary()
l1 = 0#.001
l2 = 0#.001#.0001
dropout = 0.25
out = baseModel.get_layer('block5_pool').output
out = Conv2D(128, (1, 1), padding='valid',
activity_regularizer=regularizers.l1_l2(l1, l2))(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Flatten()(out)
# out = GlobalMaxPool2D()(out)
# out = baseModel.output
out = Dense(256, activity_regularizer=regularizers.l1_l2(l1, l2) )(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Dense(256, activity_regularizer=regularizers.l1_l2(l1, l2) )(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Dense(outputShape)(out)
out = Activation('sigmoid')(out)
model = Model(inputs=baseModel.input, outputs=out)
# for layer in baseModel.layers:
# layer.trainable = False
model.summary()
return model
def createModelMNV24(self, inputShape = (320, 320), outputShape = 1):
baseModel = MobileNetV2(include_top=False, weights=None,
alpha=1,
input_shape = (inputShape[0], inputShape[1], 3))
l1 = 0#.001
l2 = 0#.001#.0001
dropout = 0.25
out1 = baseModel.get_layer('block_3_expand_relu').output
out1 = Conv2D(128, (1, 1), padding='valid',
activity_regularizer=regularizers.l1_l2(l1, l2))(out1)
out1 = BatchNormalization()(out1)
out1 = Activation('relu')(out1)
out1 = MaxPooling2D((8, 8))(out1)
out1 = Dropout(dropout)(out1)
out2 = baseModel.get_layer('block_6_expand_relu').output
out2 = Conv2D(128, (1, 1), padding='valid',
activity_regularizer=regularizers.l1_l2(l1, l2))(out2)
out2 = BatchNormalization()(out2)
out2 = Activation('relu')(out2)
out2 = MaxPooling2D((4, 4))(out2)
out2 = Dropout(dropout)(out2)
out3 = baseModel.get_layer('block_13_expand_relu').output
out3 = Conv2D(256, (1, 1), padding='valid',
activity_regularizer=regularizers.l1_l2(l1, l2))(out3)
out3 = BatchNormalization()(out3)
out3 = Activation('relu')(out3)
out3 = MaxPooling2D((2, 2))(out3)
out3 = Dropout(dropout)(out3)
out4 = baseModel.get_layer('out_relu').output
out4 = Conv2D(512, (1, 1), padding='valid',
activity_regularizer=regularizers.l1_l2(l1, l2))(out4)
out4 = BatchNormalization()(out4)
out4 = Activation('relu')(out4)
out4 = Dropout(dropout)(out4)
out = Concatenate(axis=3)([out1, out2, out3, out4])
# out = Conv2D(512, (1, 1), padding='valid',
# activity_regularizer=regularizers.l1_l2(l1, l2))(out)
# out = BatchNormalization()(out)
# out = Activation('relu')(out)
# out = Dropout(dropout)(out)
out = Conv2D(256, (1, 1), padding='valid',
activity_regularizer=regularizers.l1_l2(l1, l2))(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = MaxPooling2D((2, 2))(out)
out = Flatten()(out)
# out = GlobalMaxPool2D()(out)
# out = baseModel.output
out = Dense(256, activity_regularizer=regularizers.l1_l2(l1, l2) )(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Dense(256, activity_regularizer=regularizers.l1_l2(l1, l2) )(out)
out = BatchNormalization()(out)
out = Activation('relu')(out)
out = Dropout(dropout)(out)
out = Dense(outputShape)(out)
out = Activation('sigmoid')(out)
model = Model(inputs=baseModel.input, outputs=out)
# for layer in baseModel.layers:
# layer.trainable = False
model.summary()
return model
``` |
{
"source": "6A/asmsq",
"score": 2
} |
#### File: arch/arm/tests.py
```python
from ..testsource import * # pylint: disable=W0614
class ArmTestSource(TestSource):
@property
def name(self) -> str:
return 'arm'
@property
def test_cases(self) -> TestCases:
yield TestCase('should encode single cps instruction', [
self.make_call('cps', 'Mode::USR')
], bytearray(b'\x10\x00\x02\xf1'))
```
#### File: asmdot/arch/__init__.py
```python
from abc import ABC, abstractmethod
from argparse import ArgumentParser, Namespace
from parsy import regex, eof, seq, Parser
from typing import Callable, IO, Iterator, List
from ..ast import Declaration, Function, TestCase, TestCaseCall
from ..helpers import relative, parse, ws, end
from ..options import Options
from .testsource import TestCases, TestSource
Declarations = Iterator[Declaration]
Functions = Iterator[Function]
class Architecture(ABC, Options):
"""An architecture parser."""
@property
@abstractmethod
def name(self) -> str:
"""Returns the name of the architecture."""
pass
@staticmethod
def register(parser: ArgumentParser) -> None:
"""Registers the architecture, allowing it to add command-line parameters."""
pass
def initialize(self, args: Namespace) -> None:
"""Initializes the architecture using the provided command-line arguments."""
super().initialize_options(args, self.name)
@property
@abstractmethod
def tests(self) -> TestSource:
"""Returns the tests for the architecture."""
pass
@property
def declarations(self) -> Declarations:
"""Returns an iterator over all non-instruction declarations for the architecture."""
pass
@property
@abstractmethod
def functions(self) -> Functions:
"""Returns an iterator over all functions for the architecture."""
pass
```
#### File: languages/go/generate.py
```python
from asmdot import * # pylint: disable=W0614
from typing import Tuple
header = '''// Automatically generated file.
package {}
import (
\t"bytes"
\t"encoding/binary"
\t"errors"
\t"io"
)
// Bypass unused module error if we don't have assertions.
var _ = errors.New
var (
\tinterbuf = [8]byte{{}}
\tbyteOrder = binary.LittleEndian
\tswappedByteOrder = binary.BigEndian
)
func write16(w io.Writer, x uint16) error {{
\tbyteOrder.PutUint16(interbuf[:], x)
\t_, err := w.Write(interbuf[:2])
\treturn err
}}
func writeSwapped16(w io.Writer, x uint16) error {{
\tswappedByteOrder.PutUint16(interbuf[:], x)
\t_, err := w.Write(interbuf[:2])
\treturn err
}}
func write32(w io.Writer, x uint32) error {{
\tbyteOrder.PutUint32(interbuf[:], x)
\t_, err := w.Write(interbuf[:4])
\treturn err
}}
func writeSwapped32(w io.Writer, x uint32) error {{
\tswappedByteOrder.PutUint32(interbuf[:], x)
\t_, err := w.Write(interbuf[:4])
\treturn err
}}
func write64(w io.Writer, x uint64) error {{
\tbyteOrder.PutUint64(interbuf[:], x)
\t_, err := w.Write(interbuf[:])
\treturn err
}}
func writeSwapped64(w io.Writer, x uint64) error {{
\tswappedByteOrder.PutUint64(interbuf[:], x)
\t_, err := w.Write(interbuf[:])
\treturn err
}}
'''
header_x86 = '''
func getPrefix16(r *Reg16) byte {
if uint8(*r) < 8 {
return byte(*r)
}
*r = Reg16(uint8(*r) - 8)
return 1
}
func getPrefix32(r *Reg32) byte {
if uint8(*r) < 8 {
return byte(*r)
}
*r = Reg32(uint8(*r) - 8)
return 1
}
func getPrefix64(r *Reg64) byte {
if uint8(*r) < 8 {
return byte(*r)
}
*r = Reg64(uint8(*r) - 8)
return 1
}
'''
def _camel_case(s: str) -> str:
return s[0] + s.title().replace('_', '')[1:]
def _pascal_case(s: str) -> str:
return s.title().replace('_', '')
@handle_command_line()
class GoEmitter(Emitter):
modified_list: List[str] = []
var_map: Dict[str, Tuple[IrType, IrType]] = {}
@property
def language(self):
return 'go'
@property
def filename(self):
return f'{self.arch}/{self.arch}.go'
@property
def test_filename(self):
return f'{self.arch}/{self.arch}_test.go'
def get_function_name(self, function: Function) -> str:
return _pascal_case(function.fullname)
def get_operator(self, op: Operator) -> str:
if op == OP_BITWISE_XOR:
return '!='
else:
return op.op
def get_builtin_name(self, builtin: Builtin) -> str:
if builtin == BUILTIN_X86_PREFIX:
return 'getPrefix'
else:
return builtin.name
def __init__(self, args: Namespace, arch: str) -> None:
super().__init__(args, arch)
self.indent = Indent('\t')
def write_header(self):
self.write(header.format(self.arch))
if self.arch == 'x86':
self.write(header_x86)
def write_separator(self):
self.writeline()
def write_expr(self, expr: Expression):
if isinstance(expr, Binary):
self.write('(', expr.l, ' ', expr.op, ' ', expr.r, ')')
elif isinstance(expr, Unary):
self.write(expr.op, expr.v)
elif isinstance(expr, Ternary):
self.write('(func() { if ', expr.condition, ' { return ', expr.consequence, ' } else { return ', expr.alternative, ' })()')
elif isinstance(expr, Var):
name = _camel_case(expr.name)
if name in self.modified_list:
name = name + '_'
else:
name = f'{self.var_map[expr.name][1]}({name})'
self.write(name)
elif isinstance(expr, Call):
if self.var_map[expr.args[0].name][0].id == 'Reg16':
self.write(expr.builtin, '16(&', expr.args[0].name, ')')
elif self.var_map[expr.args[0].name][0].id == 'Reg32':
self.write(expr.builtin, '32(&', expr.args[0].name, ')')
elif self.var_map[expr.args[0].name][0].id == 'Reg64':
self.write(expr.builtin, '64(&', expr.args[0].name, ')')
elif isinstance(expr, Literal):
self.write(expr.value)
else:
raise UnsupportedExpression(expr)
def write_stmt(self, stmt: Statement):
if isinstance(stmt, Assign):
self.writelinei(stmt.variable, ' = ', stmt.value)
elif isinstance(stmt, Conditional):
self.writelinei('if ', stmt.condition, ' {')
with self.indent.further():
self.write_stmt(stmt.consequence)
if stmt.alternative:
self.writelinei('} else {')
with self.indent.further():
self.write_stmt(stmt.alternative)
self.writelinei('}')
elif isinstance(stmt, Block):
for s in stmt.statements:
self.write_stmt(s)
elif isinstance(stmt, Set):
if stmt.type.under in (TYPE_U8, TYPE_I8):
self.writelinei('if err := w.WriteByte(byte(', stmt.value, ')); err != nil {')
else:
if self.bigendian:
write = f'writeSwapped{stmt.type.under.size * 8}'
else:
write = f'write{stmt.type.under.size * 8}'
self.writelinei('if err := ', write, '(w, uint', stmt.type.under.size * 8, '(', stmt.value, ')); err != nil {')
self.writelinei('\treturn err')
self.writelinei('}')
elif isinstance(stmt, Define):
self.writelinei(f'{stmt.name} := ', stmt.value)
else:
raise UnsupportedStatement(stmt)
def write_function(self, fun: Function):
self.modified_list.clear()
self.write(f'func {fun.name}(w *bytes.Buffer')
for name, typ, usagetyp in fun.params:
self.write(f', {_camel_case(name)} {typ}')
self.var_map[name] = typ, usagetyp
self.write(') error {\n')
self.indent += 1
for name, typ, usagetyp in fun.params:
if typ is TYPE_BOOL and usagetyp is not TYPE_BOOL:
name = _camel_case(name)
self.writelinei(f'var {name}_ {usagetyp} = 0')
self.writelinei(f'if {name} {{')
self.writelinei(f'\t{name}_ = 1')
self.writelinei( '}')
self.modified_list.append(name)
for condition in fun.conditions:
self.writelinei('if !', condition, ' {')
self.writelinei('\treturn errors.New("Failed precondition: ', condition, '.")')
self.writelinei('}')
for stmt in fun.body:
self.write_stmt(stmt)
self.writelinei('return nil')
self.write('}\n\n')
self.indent -= 1
def write_decl(self, decl: Declaration):
if isinstance(decl, Enumeration):
self.writeline('// ', decl.descr)
self.writeline('type ', decl.type, ' ', decl.type.underlying, '\n')
self.writeline('const (')
for _, value, descr, fullname in decl.members + decl.additional_members:
self.writeline('\t// ', descr)
self.writeline('\t', fullname, ' ', decl.type, ' = ', value)
self.writeline(')')
elif isinstance(decl, DistinctType):
self.writeline('// ', decl.descr)
self.writeline('type ', decl.type, ' ', decl.type.underlying, '\n')
self.writeline('const (')
for name, value in decl.constants:
self.writeline('\t', name.upper(), ' ', decl.type, ' = ', value)
self.writeline(')')
else:
raise UnsupportedDeclaration(decl)
self.writeline()
def write_test_header(self):
self.writeline('package ', self.arch, '\n')
self.writeline('import (\n\t"bytes"\n\t"testing"\n)\n')
def write_test(self, test: TestCase):
self.write('func Test', _pascal_case(test.name.replace(' ', '_')), '(t *testing.T) {\n')
self.indent += 1
self.writelinei('buf := new(bytes.Buffer)\n')
def arg_str(arg: TestCaseArgument):
if isinstance(arg, ArgConstant):
return f'{arg.const.name.upper()}'
if isinstance(arg, ArgEnumMember):
return arg.member.fullname
elif isinstance(arg, ArgInteger):
return str(arg.value)
else:
raise UnsupportedTestArgument(arg)
for func, args in test.calls:
self.writei(func.name, '(buf')
for arg in args:
self.write(', ', arg_str(arg))
self.write(')\n')
self.writeline()
self.writelinei('if buf.Len() != ', len(test.expected), ' {')
self.writelinei('\tt.Errorf("buf.Len() = %d; want ', len(test.expected), '", buf.Len())')
self.writelinei('}')
self.writelinei('if !bytes.Equal(buf.Bytes(), []byte{', test.expected_bytes, '}) {')
self.writelinei('\tt.Errorf("buf.Bytes() is not valid")')
self.writelinei('}')
self.indent -= 1
self.write('}\n\n')
```
#### File: languages/haskell/generate.py
```python
from asmdot import * # pylint: disable=W0614
@handle_command_line()
class HaskellEmitter(Emitter):
is_first_statement: bool = False
@property
def language(self):
return 'haskell'
@property
def filename(self):
return f'src/Asm/Internal/{self.arch.capitalize()}.hs'
@property
def test_filename(self):
return f'test/Asm/{self.arch.capitalize()}Spec.hs'
def __init__(self, args: Namespace, arch: str) -> None:
super().__init__(args, arch)
self.indent = Indent(' ')
def get_type_name(self, ty: IrType) -> str:
return replace_pattern({
r'bool': r'Bool',
r'uint(\d+)': r'Word\1',
r'int(\d+)': r'Int\1',
r'Reg(\d*)': r'Register\1'
}, ty.id)
def get_operator(self, op: Operator) -> str:
dic = {
OP_BITWISE_AND: '.&.',
OP_BITWISE_OR : '.|.',
OP_BITWISE_XOR: '`xor`',
OP_SHL: '`shiftL`',
OP_SHR: '`shiftR`'
}
if op in dic:
return dic[op]
else:
return op.op
def get_function_name(self, function: Function) -> str:
if function.fullname in ('div'):
return function.fullname + '_'
else:
return function.fullname
def write_header(self):
self.write('module Asm.Internal.', self.arch.capitalize(), ' where\n\n')
self.indent += 1
self.writei('import Control.Exception (assert)\n')
self.writei('import Data.Bits\n')
self.writei('import Data.ByteString.Builder\n')
self.writei('import Data.Int\n')
self.writei('import Data.Semigroup (Semigroup((<>)))\n')
self.writei('import Data.Word\n\n')
def write_footer(self):
self.indent -= 1
def write_expr(self, expr: Expression):
if isinstance(expr, Binary):
self.write('(', expr.l, ' ', expr.op, ' ', expr.r, ')')
elif isinstance(expr, Unary):
self.write(expr.op, expr.v)
elif isinstance(expr, Ternary):
self.write('(if ', expr.condition, ' then ', expr.consequence, ' else ', expr.alternative, ')')
elif isinstance(expr, Var):
self.write(expr.name)
elif isinstance(expr, Call):
self.write(expr.builtin, ' ', join_any(' ', expr.args))
elif isinstance(expr, Literal):
self.write(expr.value)
else:
raise UnsupportedExpression(expr)
def write_stmt(self, stmt: Statement):
deindent = True
if self.is_first_statement:
self.is_first_statement = False
deindent = False
else:
self.writelinei('<>')
self.indent += 1
if isinstance(stmt, Assign):
self.writelinei(stmt.variable, ' = ', stmt.value)
elif isinstance(stmt, Conditional):
self.writelinei('if ', stmt.condition, ' then')
with self.indent.further():
self.is_first_statement = True
self.write_stmt(stmt.consequence)
self.is_first_statement = False
self.writelinei('else')
with self.indent.further():
self.is_first_statement = True
if stmt.alternative:
self.write_stmt(stmt.alternative)
else:
self.writelinei('mempty')
self.is_first_statement = False
elif isinstance(stmt, Block):
self.is_first_statement = True
for s in stmt.statements:
self.write_stmt(s)
self.is_first_statement = False
elif isinstance(stmt, Set):
typ = stmt.type.under
endian = 'BE ' if self.bigendian else 'LE '
if typ is TYPE_I8: self.writei('int8 ')
elif typ is TYPE_U8: self.writei('word8 ')
elif typ.id.startswith('u'): self.writei('word', typ.size * 8, endian)
else: self.writei('int', typ.size * 8, endian)
self.writeline(stmt.value)
elif isinstance(stmt, Define):
self.writelinei('let ', stmt.name, ' = ', stmt.value, ' in')
else:
raise UnsupportedStatement(stmt)
if deindent:
self.indent -= 1
def write_function(self, fun: Function):
self.is_first_statement = True
self.writei(fun.name, ' :: ')
for _, typ, _ in fun.params:
self.write(f'{typ} -> ')
self.write('Builder\n')
self.writei(fun.name, ' ', ' '.join([ name for name, _, _ in fun.params ]), ' =\n')
self.indent += 1
for name, typ, _ in fun.params:
# Deconstruct distinct types.
if typ.underlying is not None:
self.writelinei(f'let {name} = fromIntegral {name} in')
else:
self.writelinei(f'let {name} = fromIntegral {name} in')
for condition in fun.conditions:
self.writei('assert ', condition, '\n')
for stmt in fun.body:
self.write_stmt(stmt)
self.write('\n\n')
self.indent -= 1
def write_decl(self, decl: Declaration):
if isinstance(decl, Enumeration):
self.writei('-- | ', decl.descr, '\n')
self.writei('data ', decl.type, ' =\n')
self.indent += 1
prefix = ' '
for _, _, descr, fullname in decl.members + decl.additional_members:
self.writei(prefix, fullname, ' -- ^ ', descr, '\n')
if prefix == ' ':
prefix = '| '
self.writei(' deriving (Eq, Show)\n\n')
self.indent -= 1
self.writei('instance Enum ', decl.type, ' where\n')
for _, value, _, fullname in decl.members + decl.additional_members:
self.writei(' fromEnum ', fullname, ' = ', value, '\n')
self.write('\n')
for _, value, _, fullname in decl.members + decl.additional_members:
self.writei(' toEnum ', value, ' = ', fullname, '\n')
self.write('\n\n')
elif isinstance(decl, DistinctType):
self.writei('-- | ', decl.descr, '\n')
self.writei('newtype ', decl.type, ' = ', decl.type, ' ', decl.type.underlying, '\n\n')
if decl.constants:
self.writei(', '.join([ name for name, _ in decl.constants ]), ' :: ', decl.type, '\n')
for name, value in decl.constants:
self.writei(name, ' = ', decl.type, ' ', value, '\n')
self.write('\n\n')
else:
raise UnsupportedDeclaration(decl)
def write_test_header(self):
self.write(f'import Asm.{self.arch.capitalize()}\nimport Test.Hspec\n\n')
self.write(f'{self.arch}Spec = do\n')
self.indent += 1
def write_test_footer(self):
self.indent -= 1
def write_test(self, test: TestCase):
self.writei('it "', test.name, '" $\n')
self.indent += 1
self.writelinei('pending')
self.writeline()
self.indent -= 1
```
#### File: python/tests/test_arm.py
```python
from asm.arm import * # pylint: disable=W0614
def should_encode_single_cps_instruction():
asm = ArmAssembler(4)
asm.cps(Mode.USR)
assert asm.buf == b"\x10\x00\x02\xf1"
``` |
{
"source": "6adityag8/ExchangeRate",
"score": 3
} |
#### File: app/api/utils.py
```python
from typing import Optional, Dict, List
import httpx
from furl import furl
from pydantic import HttpUrl
from app.api.constants import OPENEXCHANGERATES_BASE_URL
async def request(
path_param: Optional[List[str]] = None,
query_param: Optional[Dict[str, str]] = None
) -> any:
"""
Makes an async GET call to the OpenExchange URL
after adding the given path & query parameters.
:param path_param: path parameters to be added to the URL
:param query_param: query parameters to be sent with the URL
:return: httpx Response object
"""
url = prepare_url(path_param, query_param)
async with httpx.AsyncClient() as client:
response = await client.get(url)
return response
def prepare_url(
path_param: Optional[List[str]] = None,
query_param: Optional[Dict[str, str]] = None
) -> HttpUrl:
"""
Prepares the OpenExchange API call by
adding the given path & query parameters
:param path_param: path parameters to be added to the URL
:param query_param: query parameters to be sent with the URL
:return: prepared OpenExchange URL
"""
f = furl(OPENEXCHANGERATES_BASE_URL)
f.add(path=path_param, query_params=query_param)
return f.url
def get_base_currency_dict(base: str, currency_dict: Dict[str, float]) -> Dict[str, float]:
"""
Converts USD based currency dictionary to that of the given base currency.
:param base: base currency code
:param currency_dict: USD based currency dictionary
:return: base currency converted currency dictionary
"""
base_value = 1 / currency_dict[base]
base_currency_dict = {}
for currency_code, currency_value in currency_dict.items():
base_currency_dict[currency_code] = base_value * currency_value
base_currency_dict.pop(base, None)
return base_currency_dict
``` |
{
"source": "6aika/ckanext-cloudstorage",
"score": 2
} |
#### File: ckanext/cloudstorage/controller.py
```python
import os.path
from pylons import c
from pylons.i18n import _
from ckan import logic, model
from ckan.lib import base, uploader
import ckan.lib.helpers as h
class StorageController(base.BaseController):
def resource_download(self, id, resource_id, filename=None):
context = {
'model': model,
'session': model.Session,
'user': c.user or c.author,
'auth_user_obj': c.userobj
}
try:
resource = logic.get_action('resource_show')(
context,
{
'id': resource_id
}
)
except logic.NotFound:
base.abort(404, _('Resource not found'))
except logic.NotAuthorized:
base.abort(401, _('Unauthorized to read resource {0}'.format(id)))
# This isn't a file upload, so either redirect to the source
# (if available) or error out.
if resource.get('url_type') != 'upload':
url = resource.get('url')
if not url:
base.abort(404, _('No download is available'))
h.redirect_to(url)
if filename is None:
# No filename was provided so we'll try to get one from the url.
filename = os.path.basename(resource['url'])
upload = uploader.get_resource_uploader(resource)
# if the client requests with a Content-Type header (e.g. Text preview)
# we have to add the header to the signature
try:
content_type = getattr(c.pylons.request, "content_type", None)
except AttributeError:
content_type = None
uploaded_url = upload.get_url_from_filename(resource['id'], filename,
content_type=content_type)
# The uploaded file is missing for some reason, such as the
# provider being down.
if uploaded_url is None:
base.abort(404, _('No download is available'))
h.redirect_to(uploaded_url)
``` |
{
"source": "6aika/linkedevents",
"score": 2
} |
#### File: linkedevents/events/admin.py
```python
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.widgets import FilteredSelectMultiple
from django.contrib.gis.db import models
from django.utils.translation import ugettext as _
from leaflet.admin import LeafletGeoAdmin
from modeltranslation.admin import TranslationAdmin
from reversion.admin import VersionAdmin
from events.api import generate_id
from events.models import Event, Keyword, Place, Language, \
OpeningHoursSpecification, KeywordLabel, Organization, License, DataSource
class BaseAdmin(admin.ModelAdmin):
exclude = ("created_by", "modified_by",)
def save_model(self, request, obj, form, change):
if obj.pk is None:
obj.created_by = request.user
else:
obj.modified_by = request.user
obj.save()
class EventModelAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):
pass
class KeywordAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):
pass
class HelsinkiGeoAdmin(LeafletGeoAdmin):
settings_overrides = {
'DEFAULT_CENTER': (60.171944, 24.941389),
'DEFAULT_ZOOM': 11,
'MIN_ZOOM': 3,
'MAX_ZOOM': 19,
}
class PlaceAdmin(HelsinkiGeoAdmin, BaseAdmin, TranslationAdmin, VersionAdmin):
fieldsets = (
(None, {
'fields': ('publisher', 'name', 'description', 'info_url', 'position', 'divisions', 'parent')
}),
(_('Contact info'), {
'fields': ('email', 'telephone', 'contact_type', 'street_address', 'address_locality', 'address_region',
'postal_code', 'post_office_box_num')
}),
)
def __init__(self, model, admin_site):
super().__init__(model, admin_site)
# use https CDN instead
self.openlayers_url = 'https://cdnjs.cloudflare.com/ajax/libs/openlayers/2.13.1/OpenLayers.js'
def save_model(self, request, obj, form, change):
system_id = settings.SYSTEM_DATA_SOURCE_ID
obj.data_source_id = system_id
if not obj.id:
obj.id = generate_id(system_id)
obj.origin_id = obj.id.split(':')[1]
super().save_model(request, obj, form, change)
admin.site.register(Place, PlaceAdmin)
class OrganizationAdmin(BaseAdmin):
list_display = ('name', 'nr_org_admins')
formfield_overrides = {
models.ManyToManyField: {'widget': FilteredSelectMultiple("ylläpitäjät", is_stacked=False)},
}
fields = ('admin_users',)
def nr_org_admins(self, obj):
return obj.admin_users.count()
nr_org_admins.short_description = _('Admins')
admin.site.register(Organization, OrganizationAdmin)
class DataSourceAdmin(BaseAdmin):
fields = ('id', 'name', 'api_key', 'owner')
admin.site.register(DataSource, DataSourceAdmin)
class LanguageAdmin(BaseAdmin, VersionAdmin):
pass
class PersonAdmin(BaseAdmin, VersionAdmin):
pass
class LicenseAdmin(BaseAdmin, TranslationAdmin, VersionAdmin):
def get_readonly_fields(self, request, obj=None):
if obj:
return ['id']
else:
return []
admin.site.register(License, LicenseAdmin)
```
#### File: linkedevents/events/api.py
```python
from __future__ import unicode_literals
# python
import base64
import re
import struct
import time
import urllib.parse
from datetime import datetime, timedelta
from dateutil.parser import parse as dateutil_parse
# django and drf
from django.http import Http404
from django.contrib.auth import get_user_model
from django.utils import translation
from django.core.exceptions import ValidationError, PermissionDenied
from django.db.utils import IntegrityError
from django.conf import settings
from django.core.urlresolvers import NoReverseMatch
from django.db.models import Q
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.encoding import force_text
from rest_framework import (
serializers, relations, viewsets, mixins, filters, generics, status, permissions
)
from rest_framework.settings import api_settings
from rest_framework.reverse import reverse
from rest_framework.response import Response
from rest_framework.exceptions import ParseError
from rest_framework.views import get_view_name as original_get_view_name
# 3rd party
from isodate import Duration, duration_isoformat, parse_duration
from modeltranslation.translator import translator, NotRegistered
from haystack.query import AutoQuery
from munigeo.api import (
GeoModelSerializer, GeoModelAPIView, build_bbox_filter, srid_to_srs
)
from munigeo.models import AdministrativeDivision
import pytz
import bleach
import django_filters
# events
from events import utils
from events.api_pagination import LargeResultsSetPagination
from events.auth import ApiKeyAuth, ApiKeyUser
from events.custom_elasticsearch_search_backend import (
CustomEsSearchQuerySet as SearchQuerySet
)
from events.models import (
Place, Event, Keyword, KeywordSet, Language, OpeningHoursSpecification, EventLink,
Offer, DataSource, Organization, Image, PublicationStatus, PUBLICATION_STATUSES, License
)
from events.translation import EventTranslationOptions
def get_view_name(cls, suffix=None):
if cls.__name__ == 'APIRoot':
return 'Linked Events'
return original_get_view_name(cls, suffix)
viewset_classes_by_model = {}
all_views = []
def register_view(klass, name, base_name=None):
entry = {'class': klass, 'name': name}
if base_name is not None:
entry['base_name'] = base_name
all_views.append(entry)
if klass.serializer_class and \
hasattr(klass.serializer_class, 'Meta') and \
hasattr(klass.serializer_class.Meta, 'model'):
model = klass.serializer_class.Meta.model
viewset_classes_by_model[model] = klass
def get_serializer_for_model(model, version='v1'):
Viewset = viewset_classes_by_model.get(model)
if Viewset is None: return None
serializer = None
if hasattr(Viewset, 'get_serializer_class_for_version'):
serializer = Viewset.get_serializer_class_for_version(version)
elif hasattr(Viewset, 'serializer_class'):
serializer = Viewset.serializer_class
return serializer
def generate_id(namespace):
t = time.time() * 1000
postfix = base64.b32encode(struct.pack(">Q", int(t)).lstrip(b'\x00'))
postfix = postfix.strip(b'=').lower().decode(encoding='UTF-8')
return '{}:{}'.format(namespace, postfix)
def parse_id_from_uri(uri):
"""
Parse id part from @id uri like
'http://127.0.0.1:8000/v0.1/event/matko%3A666/' -> 'matko:666'
:param uri: str
:return: str id
"""
if not uri.startswith('http'):
return uri
path = urllib.parse.urlparse(uri).path
_id = path.rstrip('/').split('/')[-1]
_id = urllib.parse.unquote(_id)
return _id
def perform_id_magic_for(data):
if 'id' in data:
err = "Do not send 'id' when POSTing a new Event (got id='{}')"
raise ParseError(err.format(data['id']))
data['id'] = generate_id(data['data_source'])
return data
class JSONLDRelatedField(relations.HyperlinkedRelatedField):
"""
Support of showing and saving of expanded JSON nesting or just a resource
URL.
Serializing is controlled by query string param 'expand', deserialization
by format of JSON given.
Default serializing is expand=false.
"""
invalid_json_error = _('Incorrect JSON. Expected JSON, received %s.')
def __init__(self, *args, **kwargs):
self.related_serializer = kwargs.pop('serializer', None)
self.hide_ld_context = kwargs.pop('hide_ld_context', False)
self.expanded = kwargs.pop('expanded', False)
super(JSONLDRelatedField, self).__init__(*args, **kwargs)
def use_pk_only_optimization(self):
if self.is_expanded():
return False
else:
return True
def to_representation(self, obj):
if isinstance(self.related_serializer, str):
self.related_serializer = globals().get(self.related_serializer, None)
if self.is_expanded():
return self.related_serializer(obj, hide_ld_context=self.hide_ld_context,
context=self.context).data
link = super(JSONLDRelatedField, self).to_representation(obj)
if link == None:
return None
return {
'@id': link
}
def to_internal_value(self, value):
# TODO: JA If @id is missing, this will complain just about value not being JSON
if not isinstance(value, dict) or '@id' not in value:
raise ValidationError(self.invalid_json_error % type(value).__name__)
url = value['@id']
if not url:
if self.required:
raise ValidationError(_('This field is required.'))
return None
return super().to_internal_value(urllib.parse.unquote(url))
def is_expanded(self):
return getattr(self, 'expanded', False)
class EnumChoiceField(serializers.Field):
"""
Database value of tinyint is converted to and from a string representation
of choice field.
TODO: Find if there's standardized way to render Schema.org enumeration
instances in JSON-LD.
"""
def __init__(self, choices, prefix='', **kwargs):
self.choices = choices
self.prefix = prefix
super(EnumChoiceField, self).__init__(**kwargs)
def to_representation(self, obj):
if obj is None:
return None
return self.prefix + utils.get_value_from_tuple_list(self.choices,
obj, 1)
def to_internal_value(self, data):
value = utils.get_value_from_tuple_list(self.choices,
self.prefix + str(data), 0)
if value is None:
raise ParseError(_("Invalid value in event_status"))
return value
class ISO8601DurationField(serializers.Field):
def to_representation(self, obj):
if obj:
d = Duration(milliseconds=obj)
return duration_isoformat(d)
else:
return None
def to_internal_value(self, data):
if data:
value = parse_duration(data)
return (
value.days * 24 * 3600 * 1000000
+ value.seconds * 1000
+ value.microseconds / 1000
)
else:
return 0
class MPTTModelSerializer(serializers.ModelSerializer):
def __init__(self, *args, **kwargs):
super(MPTTModelSerializer, self).__init__(*args, **kwargs)
for field_name in 'lft', 'rght', 'tree_id', 'level':
if field_name in self.fields:
del self.fields[field_name]
class TranslatedModelSerializer(serializers.ModelSerializer):
def __init__(self, *args, **kwargs):
super(TranslatedModelSerializer, self).__init__(*args, **kwargs)
model = self.Meta.model
try:
trans_opts = translator.get_options_for_model(model)
except NotRegistered:
self.translated_fields = []
return
self.translated_fields = trans_opts.fields.keys()
lang_codes = [x[0] for x in settings.LANGUAGES]
# Remove the pre-existing data in the bundle.
for field_name in self.translated_fields:
for lang in lang_codes:
key = "%s_%s" % (field_name, lang)
if key in self.fields:
del self.fields[key]
del self.fields[field_name]
# def get_field(self, model_field):
# kwargs = {}
# if issubclass(
# model_field.__class__,
# (django_db_models.CharField,
# django_db_models.TextField)):
# if model_field.null:
# kwargs['allow_none'] = True
# kwargs['max_length'] = getattr(model_field, 'max_length')
# return fields.CharField(**kwargs)
# return super(TranslatedModelSerializer, self).get_field(model_field)
def to_representation(self, obj):
ret = super(TranslatedModelSerializer, self).to_representation(obj)
if obj is None:
return ret
return self.translated_fields_to_representation(obj, ret)
def to_internal_value(self, data):
"""
Convert complex translated json objects to flat format.
E.g. json structure containing `name` key like this:
{
"name": {
"fi": "musiikkiklubit",
"sv": "musikklubbar",
"en": "music clubs"
},
...
}
Transforms this:
{
"name": "musiikkiklubit",
"name_fi": "musiikkiklubit",
"name_sv": "musikklubbar",
"name_en": "music clubs"
...
}
:param data:
:return:
"""
extra_fields = {} # will contain the transformation result
for field_name in self.translated_fields:
obj = data.get(field_name, None) # { "fi": "musiikkiklubit", "sv": ... }
if not obj:
continue
for language in (lang[0] for lang in settings.LANGUAGES if lang[0] in obj):
value = obj[language] # "musiikkiklubit"
if language == settings.LANGUAGES[0][0]: # default language
extra_fields[field_name] = value # { "name": "musiikkiklubit" }
extra_fields['{}_{}'.format(field_name, language)] = value # { "name_fi": "musiikkiklubit" }
del data[field_name] # delete original translated fields
# handle other than translated fields
data = super().to_internal_value(data)
# add translated fields to the final result
data.update(extra_fields)
return data
def translated_fields_to_representation(self, obj, ret):
for field_name in self.translated_fields:
d = {}
for lang in [x[0] for x in settings.LANGUAGES]:
key = "%s_%s" % (field_name, lang)
val = getattr(obj, key, None)
if val == None:
continue
d[lang] = val
# If no text provided, leave the field as null
for key, val in d.items():
if val != None:
break
else:
d = None
ret[field_name] = d
return ret
class LinkedEventsSerializer(TranslatedModelSerializer, MPTTModelSerializer):
"""Serializer with the support for JSON-LD/Schema.org.
JSON-LD/Schema.org syntax::
{
"@context": "http://schema.org",
"@type": "Event",
"name": "Event name",
...
}
See full example at: http://schema.org/Event
Args:
hide_ld_context (bool):
Hides `@context` from JSON, can be used in nested
serializers
"""
system_generated_fields = ('created_time', 'last_modified_time', 'created_by', 'last_modified_by')
non_visible_fields = ('created_by', 'last_modified_by')
def __init__(self, instance=None, files=None,
context=None, partial=False, many=None, skip_fields=set(),
allow_add_remove=False, hide_ld_context=False, **kwargs):
super(LinkedEventsSerializer, self).__init__(
instance=instance, context=context, **kwargs)
for field in self.non_visible_fields:
if field in self.fields:
del self.fields[field]
self.skip_fields = skip_fields
if context is not None:
include_fields = context.get('include', [])
for field_name in include_fields:
if not field_name in self.fields:
continue
field = self.fields[field_name]
if isinstance(field, relations.ManyRelatedField):
field = field.child_relation
if not isinstance(field, JSONLDRelatedField):
continue
field.expanded = True
self.skip_fields |= context.get('skip_fields', set())
self.hide_ld_context = hide_ld_context
self.disable_camelcase = True
if self.context and 'request' in self.context:
request = self.context['request']
if 'disable_camelcase' in request.query_params:
self.disable_camelcase = True
def to_internal_value(self, data):
for field in self.system_generated_fields:
if field in data:
del data[field]
data = super().to_internal_value(data)
return data
def to_representation(self, obj):
"""
Before sending to renderer there's a need to do additional work on
to-be-JSON dictionary data:
1. Add @context, @type and @id fields
2. Convert field names to camelCase
Renderer is the right place for this but now loop is done just once.
Reversal conversion is done in parser.
"""
ret = super(LinkedEventsSerializer, self).to_representation(obj)
if 'id' in ret and 'request' in self.context:
try:
ret['@id'] = reverse(self.view_name,
kwargs={u'pk': ret['id']},
request=self.context['request'])
except NoReverseMatch:
ret['@id'] = str(ret['id'])
# Context is hidden if:
# 1) hide_ld_context is set to True
# 2) self.object is None, e.g. we are in the list of stuff
if not self.hide_ld_context and self.instance is not None:
if hasattr(obj, 'jsonld_context') \
and isinstance(obj.jsonld_context, (dict, list)):
ret['@context'] = obj.jsonld_context
else:
ret['@context'] = 'http://schema.org'
# Use jsonld_type attribute if present,
# if not fallback to automatic resolution by model name.
# Note: Plan 'type' could be aliased to @type in context definition to
# conform JSON-LD spec.
if hasattr(obj, 'jsonld_type'):
ret['@type'] = obj.jsonld_type
else:
ret['@type'] = obj.__class__.__name__
if self.context['request'].version == 'v0.1':
return ret
for field in self.skip_fields:
if field in ret:
del ret[field]
return ret
def validate(self, data):
if 'name' in self.translated_fields:
name_exists = False
languages = [x[0] for x in settings.LANGUAGES]
for language in languages:
if 'name_%s' % language in data:
name_exists = True
break
else:
name_exists = 'name' in data
if not name_exists:
raise serializers.ValidationError({'name': _('The name must be specified.')})
super().validate(data)
return data
def create(self, validated_data):
try:
instance = super().create(validated_data)
except IntegrityError as error:
if 'duplicate' and 'pkey' in str(error):
raise serializers.ValidationError({'id':_("An object with given id already exists.")})
else:
raise error
return instance
def update(self, instance, validated_data):
if 'id' in validated_data:
if instance.id != validated_data['id']:
raise serializers.ValidationError({'id':_("You may not change the id of an existing object.")})
super().update(instance, validated_data)
return instance
def _clean_qp(query_params):
"""
Strip 'event.' prefix from all query params.
:rtype : QueryDict
:param query_params: dict self.request.query_params
:return: QueryDict query_params
"""
query_params = query_params.copy() # do not alter original dict
nspace = 'event.'
for key in query_params.keys():
if key.startswith(nspace):
new_key = key[len(nspace):]
# .pop() returns a list(?), don't use
# query_params[new_key] = query_params.pop(key)
query_params[new_key] = query_params[key]
del query_params[key]
return query_params
class KeywordSerializer(LinkedEventsSerializer):
view_name = 'keyword-detail'
class Meta:
model = Keyword
class KeywordRetrieveViewSet(mixins.RetrieveModelMixin, viewsets.GenericViewSet):
queryset = Keyword.objects.all()
serializer_class = KeywordSerializer
class KeywordListViewSet(mixins.ListModelMixin, viewsets.GenericViewSet):
queryset = Keyword.objects.all()
serializer_class = KeywordSerializer
def get_queryset(self):
"""
Return Keyword queryset. If request has parameter show_all_keywords=1
all Keywords are returned, otherwise only which have events.
Additional query parameters:
event.data_source
event.start
event.end
"""
queryset = Keyword.objects.all()
data_source = self.request.query_params.get('data_source')
if data_source:
data_source = data_source.lower()
queryset = queryset.filter(data_source=data_source)
if not self.request.query_params.get('show_all_keywords'):
events = Event.objects.all()
params = _clean_qp(self.request.query_params)
if 'data_source' in params:
del params['data_source']
events = _filter_event_queryset(events, params)
keyword_ids = events.values_list('keywords',
flat=True).distinct().order_by()
queryset = queryset.filter(id__in=keyword_ids)
# Optionally filter keywords by filter parameter,
# can be used e.g. with typeahead.js
val = self.request.query_params.get('filter')
if val:
queryset = queryset.filter(name__startswith=val)
return queryset
register_view(KeywordRetrieveViewSet, 'keyword')
register_view(KeywordListViewSet, 'keyword')
class KeywordSetSerializer(LinkedEventsSerializer):
view_name = 'keywordset-detail'
keywords = JSONLDRelatedField(
serializer=KeywordSerializer, many=True, required=True, allow_empty=False,
view_name='keyword-detail', queryset=Keyword.objects.all())
usage = EnumChoiceField(KeywordSet.USAGES)
class Meta:
model = KeywordSet
class JSONAPIViewSet(viewsets.ReadOnlyModelViewSet):
def initial(self, request, *args, **kwargs):
ret = super(JSONAPIViewSet, self).initial(request, *args, **kwargs)
self.srs = srid_to_srs(self.request.query_params.get('srid', None))
return ret
def get_serializer_context(self):
context = super(JSONAPIViewSet, self).get_serializer_context()
include = self.request.query_params.get('include', '')
context['include'] = [x.strip() for x in include.split(',') if x]
context['srs'] = self.srs
context.setdefault('skip_fields', set()).add('origin_id')
return context
class KeywordSetViewSet(JSONAPIViewSet):
queryset = KeywordSet.objects.all()
serializer_class = KeywordSetSerializer
register_view(KeywordSetViewSet, 'keyword_set')
class DivisionSerializer(TranslatedModelSerializer):
type = serializers.SlugRelatedField(slug_field='type', read_only=True)
municipality = serializers.SlugRelatedField(slug_field='name', read_only=True)
class Meta:
model = AdministrativeDivision
fields = ('type', 'name', 'ocd_id', 'municipality')
class PlaceSerializer(LinkedEventsSerializer, GeoModelSerializer):
view_name = 'place-detail'
divisions = DivisionSerializer(many=True, read_only=True)
class Meta:
model = Place
class PlaceFilter(filters.FilterSet):
division = django_filters.Filter(name='divisions__ocd_id', lookup_type='in',
widget=django_filters.widgets.CSVWidget())
class Meta:
model = Place
fields = ('division',)
class PlaceRetrieveViewSet(GeoModelAPIView,
viewsets.GenericViewSet,
mixins.RetrieveModelMixin):
queryset = Place.objects.all()
serializer_class = PlaceSerializer
def get_serializer_context(self):
context = super(PlaceRetrieveViewSet, self).get_serializer_context()
context.setdefault('skip_fields', set()).add('origin_id')
return context
class PlaceListViewSet(GeoModelAPIView,
viewsets.GenericViewSet,
mixins.ListModelMixin):
queryset = Place.objects.all()
serializer_class = PlaceSerializer
filter_backends = (filters.DjangoFilterBackend,)
filter_class = PlaceFilter
def get_queryset(self):
"""
Return Place queryset. If request has parameter show_all_places=1
all Places are returned, otherwise only which have events.
Additional query parameters:
event.data_source
event.start
event.end
"""
queryset = Place.objects.prefetch_related('divisions__type', 'divisions__municipality')
if self.request.query_params.get('show_all_places'):
pass
else:
events = Event.objects.all()
params = _clean_qp(self.request.query_params)
events = _filter_event_queryset(events, params)
location_ids = events.values_list('location_id',
flat=True).distinct().order_by()
queryset = queryset.filter(id__in=location_ids)
return queryset
def get_serializer_context(self):
context = super(PlaceListViewSet, self).get_serializer_context()
context.setdefault('skip_fields', set()).add('origin_id')
return context
register_view(PlaceRetrieveViewSet, 'place')
register_view(PlaceListViewSet, 'place')
class OpeningHoursSpecificationSerializer(LinkedEventsSerializer):
class Meta:
model = OpeningHoursSpecification
class LanguageSerializer(LinkedEventsSerializer):
view_name = 'language-detail'
translation_available = serializers.SerializerMethodField()
class Meta:
model = Language
def get_translation_available(self, obj):
return obj.id in [language[0] for language in settings.LANGUAGES]
class LanguageViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Language.objects.all()
serializer_class = LanguageSerializer
register_view(LanguageViewSet, 'language')
LOCAL_TZ = pytz.timezone(settings.TIME_ZONE)
class OrganizationSerializer(LinkedEventsSerializer):
view_name = 'organization-detail'
class Meta:
model = Organization
exclude = ['admin_users']
class OrganizationViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Organization.objects.all()
serializer_class = OrganizationSerializer
register_view(OrganizationViewSet, 'organization')
class EventLinkSerializer(serializers.ModelSerializer):
def to_representation(self, obj):
ret = super(EventLinkSerializer, self).to_representation(obj)
if not ret['name']:
ret['name'] = None
return ret
class Meta:
model = EventLink
exclude = ['id', 'event']
class OfferSerializer(TranslatedModelSerializer):
class Meta:
model = Offer
exclude = ['id', 'event']
class ImageSerializer(LinkedEventsSerializer):
view_name = 'image-detail'
license = serializers.PrimaryKeyRelatedField(queryset=License.objects.all(), required=False)
class Meta:
model = Image
def to_representation(self, obj):
# the url field is customized based on image and url
representation = super().to_representation(obj)
if representation['image']:
representation['url'] = representation['image']
representation.pop('image')
return representation
def validate(self, data):
# name the image after the file, if name was not provided
if 'name' not in data:
if 'url' in data:
data['name'] = str(data['url']).rsplit('/', 1)[-1]
if 'image' in data:
data['name'] = str(data['image']).rsplit('/', 1)[-1]
super().validate(data)
return data
class ImageViewSet(viewsets.ModelViewSet):
queryset = Image.objects.all()
serializer_class = ImageSerializer
pagination_class = LargeResultsSetPagination
ordering_fields = ('last_modified_time',)
ordering = ('-last_modified_time',)
def get_queryset(self):
queryset = Image.objects.all()
val = self.request.query_params.get('publisher', None)
if val:
queryset = queryset.filter(publisher__id=val)
return queryset
def perform_create(self, serializer):
user = self.request.user if not self.request.user.is_anonymous() else None
serializer.save(created_by=user, last_modified_by=user)
def perform_update(self, serializer):
# ensure image can only be edited within the organization
user = self.request.user
image = self.get_object()
if not user.get_default_organization() == image.publisher:
raise PermissionDenied()
user = self.request.user if not self.request.user.is_anonymous() else None
serializer.save(last_modified_by=user)
def perform_destroy(self, instance):
# ensure image can only be deleted within the organization
user = self.request.user
image = self.get_object()
if not user.get_default_organization() == image.publisher:
raise PermissionDenied()
super().perform_destroy(instance)
register_view(ImageViewSet, 'image', base_name='image')
class EventSerializer(LinkedEventsSerializer, GeoModelAPIView):
id = serializers.CharField(required=False)
location = JSONLDRelatedField(serializer=PlaceSerializer, required=False,
view_name='place-detail', queryset=Place.objects.all())
# provider = OrganizationSerializer(hide_ld_context=True)
keywords = JSONLDRelatedField(serializer=KeywordSerializer, many=True, allow_empty=False,
required=False,
view_name='keyword-detail', queryset=Keyword.objects.all())
super_event = JSONLDRelatedField(serializer='EventSerializer', required=False, view_name='event-detail',
queryset=Event.objects.all(), allow_null=True)
event_status = EnumChoiceField(Event.STATUSES, required=False)
publication_status = EnumChoiceField(PUBLICATION_STATUSES)
external_links = EventLinkSerializer(many=True, required=False)
offers = OfferSerializer(many=True, required=False)
data_source = serializers.PrimaryKeyRelatedField(queryset=DataSource.objects.all(),
required=False)
publisher = serializers.PrimaryKeyRelatedField(queryset=Organization.objects.all(),
required=False)
sub_events = JSONLDRelatedField(serializer='EventSerializer',
required=False, view_name='event-detail',
many=True, queryset=Event.objects.all())
image = JSONLDRelatedField(serializer=ImageSerializer, required=False, allow_null=True,
view_name='image-detail', queryset=Image.objects.all(), expanded=True)
in_language = JSONLDRelatedField(serializer=LanguageSerializer, required=False,
view_name='language-detail', many=True, queryset=Language.objects.all())
audience = JSONLDRelatedField(serializer=KeywordSerializer, view_name='keyword-detail',
many=True, required=False, queryset=Keyword.objects.all())
view_name = 'event-detail'
fields_needed_to_publish = ('keywords', 'location', 'start_time', 'short_description', 'description')
def __init__(self, *args, skip_empties=False, **kwargs):
super(EventSerializer, self).__init__(*args, **kwargs)
# The following can be used when serializing when
# testing and debugging.
self.skip_empties = skip_empties
# for post and put methods, user information is needed to restrict permissions at validate
self.method = self.context['request'].method
self.user = self.context['request'].user
if self.method in permissions.SAFE_METHODS:
return
# api_key takes precedence over user
if isinstance(self.context['request'].auth, ApiKeyAuth):
self.data_source = self.context['request'].auth.get_authenticated_data_source()
self.publisher = self.data_source.owner
if not self.publisher:
raise PermissionDenied(_("Data source doesn't belong to any organization"))
else:
# events created by api are marked coming from the system data source unless api_key is provided
self.data_source = DataSource.objects.get(id=settings.SYSTEM_DATA_SOURCE_ID)
# user organization is used unless api_key is provided
self.publisher = self.user.get_default_organization()
if not self.publisher:
raise PermissionDenied(_("User doesn't belong to any organization"))
def get_datetimes(self, data):
for field in ['date_published', 'start_time', 'end_time']:
val = data.get(field, None)
if val:
if isinstance(val, str):
data[field] = parse_time(val, True)
return data
def to_internal_value(self, data):
# parse the first image to the image field
if 'images' in data:
if data['images']:
data['image'] = data['images'][0]
# If the obligatory fields are null or empty, remove them to prevent to_internal_value from checking them.
# Only for drafts, because null start time of a PUBLIC event will indicate POSTPONED.
if data.get('publication_status') == 'draft':
# however, the optional fields cannot be null and must be removed
for field in self.fields_needed_to_publish:
if not data.get(field):
data.pop(field, None)
data = super().to_internal_value(data)
return data
def validate(self, data):
# validate id permissions
if 'id' in data:
if not data['id'].split(':', 1)[0] == self.data_source.id:
raise serializers.ValidationError(
{'id': _("Setting id to %(given)s " +
" is not allowed for your organization. The id"
" must be left blank or set to %(data_source)s:desired_id") %
{'given': str(data['id']), 'data_source': self.data_source}})
# validate data source permissions
if 'data_source' in data:
if data['data_source'] != self.data_source:
raise serializers.ValidationError(
{'data_source': _("Setting data_source to %(given)s " +
" is not allowed for your organization. The data source" +
" must be left blank or set to %(required)s") %
{'given': data['data_source'], 'required': self.data_source}})
else:
data['data_source'] = self.data_source
# validate publisher permissions
if 'publisher' in data:
if data['publisher'] != self.publisher:
raise serializers.ValidationError(
{'publisher': _("Setting publisher to %(given)s " +
" is not allowed for your organization. The publisher" +
" must be left blank or set to %(required)s ") %
{'given': data['publisher'], 'required': self.publisher}})
else:
data['publisher'] = self.publisher
# clean the html
for k, v in data.items():
if type(v) == str:
if k in ["description"]:
data[k] = bleach.clean(v, settings.BLEACH_ALLOWED_TAGS)
data = super().validate(data)
# require the publication status
if 'publication_status' not in data:
raise serializers.ValidationError({'publication_status':
_("You must specify whether you wish to submit a draft or a public event.")})
# if the event is a draft, no further validation is performed
if data['publication_status'] == PublicationStatus.DRAFT:
return data
# check that published events have a location, keyword and start_time
languages = [x[0] for x in settings.LANGUAGES]
errors = {}
lang_error_msg = _('This field must be specified before an event is published.')
for field in self.fields_needed_to_publish:
if field in self.translated_fields:
for lang in languages:
name = "name_%s" % lang
field_lang = "%s_%s" % (field, lang)
if data.get(name) and not data.get(field_lang):
errors.setdefault(field, {})[lang] = lang_error_msg
if field == 'short_description' and len(data.get(field_lang, [])) > 160:
errors.setdefault(field, {})[lang] = (
_('Short description length must be 160 characters or less'))
elif not data.get(field):
# The start time may be null if a published event is postponed!
if field == 'start_time' and 'start_time' in data:
pass
else:
errors[field] = lang_error_msg
# published events need price info = at least one offer that either has a price or is free
price_exists = False
for offer in data.get('offers', []):
is_free = offer.get('is_free', False)
if is_free or 'price' in offer:
price_exists = True
break
if not price_exists:
errors['offers'] = _('Price info must be specified before an event is published.')
# adjust start_time and has_start_time
if 'has_start_time' not in data:
# provided time is assumed exact
data['has_start_time'] = True
if not data['has_start_time']:
# provided time is inexact
data['start_time'] = data['start_time'].replace(hour=0, minute=0, second=0)
# adjust end_time and has_end_time
# If no end timestamp supplied, we treat the event as ending at midnight.
if 'end_time' not in data or not data['end_time']:
data['end_time'] = data['start_time']
data['has_end_time'] = False
if 'has_end_time' not in data:
# provided time is assumed exact
data['has_end_time'] = True
# If end date is supplied but no time, the event ends at midnight of the following day.
if not data['has_end_time']:
data['end_time'] = data['end_time'].replace(hour=0, minute=0, second=0)
data['end_time'] += timedelta(days=1)
if data.get('start_time') and data['start_time'] < timezone.now():
errors['start_time'] = force_text(_('Start time cannot be in the past.'))
if data.get('end_time') and data['end_time'] < timezone.now():
errors['end_time'] = force_text(_('End time cannot be in the past.'))
if errors:
raise serializers.ValidationError(errors)
return data
def create(self, validated_data):
# if id was not provided, we generate it upon creation:
if 'id' not in validated_data:
validated_data['id'] = generate_id(self.data_source)
# no django user exists for the api key
if isinstance(self.user, ApiKeyUser):
self.user = None
offers = validated_data.pop('offers', [])
links = validated_data.pop('external_links', [])
validated_data.update({'created_by': self.user,
'last_modified_by': self.user,
'created_time': Event.now(), # we must specify creation time as we are setting id
'event_status': Event.Status.SCHEDULED, # mark all newly created events as scheduled
})
event = super().create(validated_data)
# create and add related objects
for offer in offers:
Offer.objects.create(event=event, **offer)
for link in links:
EventLink.objects.create(event=event, **link)
return event
def update(self, instance, validated_data):
# allow updating events if the api key matches event data source
if isinstance(self.user, ApiKeyUser):
self.user = None
if not instance.data_source == self.data_source:
raise PermissionDenied()
else:
if not instance.is_editable() or not instance.is_admin(self.user):
raise PermissionDenied()
offers = validated_data.pop('offers', None)
links = validated_data.pop('external_links', None)
validated_data['last_modified_by'] = self.user
# The API only allows scheduling and cancelling events.
# POSTPONED and RESCHEDULED may not be set, but should be allowed in already set instances.
if validated_data.get('event_status') in (Event.Status.POSTPONED, Event.Status.RESCHEDULED):
if validated_data.get('event_status') != instance.event_status:
raise serializers.ValidationError({'event_status':
_('POSTPONED and RESCHEDULED statuses cannot be set directly.'
'Changing event start_time or marking start_time null'
'will reschedule or postpone an event.')})
# Update event_status if a PUBLIC SCHEDULED or CANCELLED event start_time is updated.
# DRAFT events will remain SCHEDULED up to publication.
# Check that the event is not explicitly CANCELLED at the same time.
if (instance.publication_status == PublicationStatus.PUBLIC and
validated_data.get('event_status', Event.Status.SCHEDULED) != Event.Status.CANCELLED):
# if the instance was ever CANCELLED, RESCHEDULED or POSTPONED, it may never be SCHEDULED again
if instance.event_status != Event.Status.SCHEDULED:
if validated_data.get('event_status') == Event.Status.SCHEDULED:
raise serializers.ValidationError({'event_status':
_('Public events cannot be set back to SCHEDULED if they'
'have already been CANCELLED, POSTPONED or RESCHEDULED.')})
validated_data['event_status'] = instance.event_status
try:
# if the start_time changes, reschedule the event
if validated_data['start_time'] != instance.start_time:
validated_data['event_status'] = Event.Status.RESCHEDULED
# if the posted start_time is null, postpone the event
if not validated_data['start_time']:
validated_data['event_status'] = Event.Status.POSTPONED
except KeyError:
# if the start_time is not provided, do nothing
pass
# update validated fields
super().update(instance, validated_data)
# also update `has_end_time` if needed
if instance.end_time:
instance.has_end_time = True
instance.save()
# update offers
if isinstance(offers, list):
instance.offers.all().delete()
for offer in offers:
Offer.objects.create(event=instance, **offer)
# update ext links
if isinstance(links, list):
instance.external_links.all().delete()
for link in links:
EventLink.objects.create(event=instance, **link)
return instance
def to_representation(self, obj):
ret = super(EventSerializer, self).to_representation(obj)
if 'start_time' in ret and not obj.has_start_time:
# Return only the date part
ret['start_time'] = obj.start_time.astimezone(LOCAL_TZ).strftime('%Y-%m-%d')
if 'end_time' in ret and not obj.has_end_time:
# If we're storing only the date part, do not pretend we have the exact time.
# Timestamp is of the form %Y-%m-%dT00:00:00, so we report the previous date.
ret['end_time'] = (obj.end_time - timedelta(days=1)).astimezone(LOCAL_TZ).strftime('%Y-%m-%d')
# Unless the event is short, then no need for end time
if obj.end_time - obj.start_time <= timedelta(days=1):
ret['end_time'] = None
del ret['has_start_time']
del ret['has_end_time']
if hasattr(obj, 'days_left'):
ret['days_left'] = int(obj.days_left)
if self.skip_empties:
for k in list(ret.keys()):
val = ret[k]
try:
if val is None or len(val) == 0:
del ret[k]
except TypeError:
# not list/dict
pass
if 'image' in ret:
if ret['image'] == None:
ret['images'] = []
else:
ret['images'] = [ret['image']]
del ret['image']
request = self.context.get('request')
if request:
if not request.user.is_authenticated():
del ret['publication_status']
return ret
class Meta:
model = Event
exclude = ['is_recurring_super', 'deleted']
def _format_images_v0_1(data):
if 'images' not in data:
return
images = data.get('images')
del data['images']
if len(images) == 0:
data['image'] = None
else:
data['image'] = images[0].get('url', None)
class EventSerializerV0_1(EventSerializer):
def __init__(self, *args, **kwargs):
kwargs.setdefault('context', {}).setdefault('include', []).append('image')
super(EventSerializerV0_1, self).__init__(*args, **kwargs)
def to_representation(self, obj):
ret = super(EventSerializerV0_1, self).to_representation(obj)
_format_images_v0_1(ret)
return ret
def parse_time(time_str, is_start):
time_str = time_str.strip()
# Handle dates first. Assume dates are given in local timezone.
# FIXME: What if there's no local timezone?
try:
dt = datetime.strptime(time_str, '%Y-%m-%d')
dt = LOCAL_TZ.localize(dt)
except ValueError:
dt = None
if not dt:
if time_str.lower() == 'today':
dt = datetime.utcnow().replace(tzinfo=pytz.utc)
dt = dt.astimezone(LOCAL_TZ)
dt = dt.replace(hour=0, minute=0, second=0, microsecond=0)
if dt:
# With start timestamps, we treat dates as beginning
# at midnight the same day. End timestamps are taken to
# mean midnight on the following day.
if not is_start:
dt = dt + timedelta(days=1)
else:
try:
# Handle all other times through dateutil.
dt = dateutil_parse(time_str)
except (TypeError, ValueError):
raise ParseError('time in invalid format (try ISO 8601 or yyyy-mm-dd)')
return dt
class LinkedEventsOrderingFilter(filters.OrderingFilter):
ordering_param = 'sort'
class EventOrderingFilter(LinkedEventsOrderingFilter):
def filter_queryset(self, request, queryset, view):
queryset = super(EventOrderingFilter, self).filter_queryset(request, queryset, view)
ordering = self.get_ordering(request, queryset, view)
if not ordering:
ordering = []
if 'days_left' in [x.lstrip('-') for x in ordering]:
queryset = queryset.extra(select={'days_left': 'date_part(\'day\', end_time - start_time)'})
return queryset
def parse_duration_string(duration):
"""
Parse duration string expressed in format
86400 or 86400s (24 hours)
180m or 3h (3 hours)
3d (3 days)
"""
m = re.match(r'(\d+)\s*(d|h|m|s)?$', duration.strip().lower())
if not m:
raise ParseError("Invalid duration supplied. Try '1d', '2h' or '180m'.")
val, unit = m.groups()
if not unit:
unit = 's'
if unit == 's':
mul = 1
elif unit == 'm':
mul = 60
elif unit == 'h':
mul = 3600
elif unit == 'd':
mul = 24 * 3600
return int(val) * mul
def _filter_event_queryset(queryset, params, srs=None):
"""
Filter events queryset by params
(e.g. self.request.query_params in EventViewSet)
"""
# Filter by string (case insensitive). This searches from all fields
# which are marked translatable in translation.py
val = params.get('text', None)
if val:
val = val.lower()
# Free string search from all translated fields
fields = EventTranslationOptions.fields
# and these languages
languages = [x[0] for x in settings.LANGUAGES]
qset = Q()
for field in fields:
for lang in languages:
kwarg = {field + '_' + lang + '__icontains': val}
qset |= Q(**kwarg)
queryset = queryset.filter(qset)
val = params.get('last_modified_since', None)
# This should be in format which dateutil.parser recognizes, e.g.
# 2014-10-29T12:00:00Z == 2014-10-29T12:00:00+0000 (UTC time)
# or 2014-10-29T12:00:00+0200 (local time)
if val:
dt = parse_time(val, is_start=False)
queryset = queryset.filter(Q(last_modified_time__gte=dt))
val = params.get('start', None)
if val:
dt = parse_time(val, is_start=True)
queryset = queryset.filter(Q(end_time__gt=dt) | Q(start_time__gte=dt))
val = params.get('end', None)
if val:
dt = parse_time(val, is_start=False)
queryset = queryset.filter(Q(end_time__lt=dt) | Q(start_time__lte=dt))
val = params.get('bbox', None)
if val:
bbox_filter = build_bbox_filter(srs, val, 'position')
places = Place.geo_objects.filter(**bbox_filter)
queryset = queryset.filter(location__in=places)
# Filter by data source, multiple sources separated by comma
val = params.get('data_source', None)
if val:
val = val.split(',')
queryset = queryset.filter(data_source_id__in=val)
# Negative filter by data source, multiple sources separated by comma
val = params.get('data_source!', None)
if val:
val = val.split(',')
queryset = queryset.exclude(data_source_id__in=val)
# Filter by location id, multiple ids separated by comma
val = params.get('location', None)
if val:
val = val.split(',')
queryset = queryset.filter(location_id__in=val)
# Filter by keyword id, multiple ids separated by comma
val = params.get('keyword', None)
if val:
val = val.split(',')
queryset = queryset.filter(keywords__pk__in=val)
# Filter only super or sub events if recurring has value
val = params.get('recurring', None)
if val:
val = val.lower()
if val == 'super':
queryset = queryset.filter(is_recurring_super=True)
elif val == 'sub':
queryset = queryset.filter(is_recurring_super=False)
val = params.get('max_duration', None)
if val:
dur = parse_duration_string(val)
cond = 'end_time - start_time <= %s :: interval'
queryset = queryset.extra(where=[cond], params=[str(dur)])
val = params.get('min_duration', None)
if val:
dur = parse_duration_string(val)
cond = 'end_time - start_time >= %s :: interval'
queryset = queryset.extra(where=[cond], params=[str(dur)])
val = params.get('publisher', None)
if val:
queryset = queryset.filter(publisher__id=val)
return queryset
class DivisionFilter(django_filters.Filter):
"""
Depending on the deployment location, offers simpler filtering by appending
country and municipality information from local settings.
"""
def filter(self, qs, value):
if hasattr(settings, 'MUNIGEO_MUNI') and hasattr(settings, 'MUNIGEO_COUNTRY'):
for i, item in enumerate(value):
if not item.startswith('ocd-division'):
if not item.startswith('country'):
if not item.startswith('kunta'):
item = settings.MUNIGEO_MUNI + '/' + item
item = settings.MUNIGEO_COUNTRY + '/' + item
item = 'ocd-division/' + item
value[i] = item
return super().filter(qs, value)
class EventFilter(filters.FilterSet):
division = DivisionFilter(name='location__divisions__ocd_id', lookup_expr='in',
widget=django_filters.widgets.CSVWidget())
class Meta:
model = Event
fields = ('division',)
class EventViewSet(viewsets.ModelViewSet, JSONAPIViewSet):
"""
# Filtering retrieved events
Query parameters can be used to filter the retrieved events by
the following criteria.
## Event time
Use `start` and `end` to restrict the date range of returned events.
Any events that intersect with the given date range will be returned.
The parameters `start` and `end` can be given in the following formats:
- ISO 8601 (including the time of day)
- yyyy-mm-dd
In addition, `today` can be used as the value.
Example:
event/?start=2014-01-15&end=2014-01-20
[See the result](?start=2014-01-15&end=2014-01-20 "json")
## Event location
### Bounding box
To restrict the retrieved events to a geographical region, use
the query parameter `bbox` in the format
bbox=west,south,east,north
Where `west` is the longitude of the rectangle's western boundary,
`south` is the latitude of the rectangle's southern boundary,
and so on.
Example:
event/?bbox=24.9348,60.1762,24.9681,60.1889
[See the result](?bbox=24.9348,60.1762,24.9681,60.1889 "json")
# Getting detailed data
In the default case, keywords, locations, and other fields that
refer to separate resources are only displayed as simple references.
If you want to include the complete data from related resources in
the current response, use the keyword `include`. For example:
event/?include=location,keywords
[See the result](?include=location,keywords "json")
# Response data for the current URL
"""
queryset = Event.objects.filter(deleted=False)
# Use select_ and prefetch_related() to reduce the amount of queries
queryset = queryset.select_related('location')
queryset = queryset.prefetch_related(
'offers', 'keywords', 'external_links', 'sub_events')
serializer_class = EventSerializer
filter_backends = (EventOrderingFilter, filters.DjangoFilterBackend)
filter_class = EventFilter
ordering_fields = ('start_time', 'end_time', 'days_left', 'last_modified_time')
ordering = ('-last_modified_time',)
@staticmethod
def get_serializer_class_for_version(version):
if version == 'v0.1':
return EventSerializerV0_1
return EventSerializer
def get_serializer_class(self):
return EventViewSet.get_serializer_class_for_version(self.request.version)
def get_serializer_context(self):
context = super(EventViewSet, self).get_serializer_context()
context.setdefault('skip_fields', set()).update(set([
'headline',
'secondary_headline']))
return context
def get_object(self):
# Overridden to prevent queryset filtering from being applied
# outside list views.
try:
event = Event.objects.get(pk=self.kwargs['pk'])
except Event.DoesNotExist:
raise Http404("Event does not exist")
if (event.publication_status == PublicationStatus.PUBLIC or
(self.request.user and
self.request.user.is_authenticated() and
self.request.user.get_default_organization() == event.publisher)):
return event
else:
raise Http404("Event does not exist")
def filter_queryset(self, queryset):
"""
TODO: convert to use proper filter framework
"""
user_organization = None
if self.request.user and self.request.user.is_authenticated():
user_organization = self.request.user.get_default_organization()
queryset = super(EventViewSet, self).filter_queryset(queryset)
auth_filters = Q(publication_status=PublicationStatus.PUBLIC)
if user_organization:
# USER IS AUTHENTICATED
if 'show_all' in self.request.query_params:
# Show all events for this organization,
# along with public events for others.
auth_filters |= Q(publisher=user_organization)
queryset = queryset.filter(auth_filters)
queryset = _filter_event_queryset(queryset, self.request.query_params,
srs=self.srs)
return queryset.filter()
register_view(EventViewSet, 'event')
class SearchSerializer(serializers.Serializer):
def to_representation(self, search_result):
model = search_result.model
version = self.context['request'].version
ser_class = get_serializer_for_model(model, version=version)
assert ser_class is not None, "Serializer for %s not found" % model
data = ser_class(search_result.object, context=self.context).data
data['resource_type'] = model._meta.model_name
data['score'] = search_result.score
return data
class SearchSerializerV0_1(SearchSerializer):
def to_representation(self, search_result):
ret = super(SearchSerializerV0_1, self).to_representation(search_result)
if 'resource_type' in ret:
ret['object_type'] = ret['resource_type']
del ret['resource_type']
return ret
DATE_DECAY_SCALE = '30d'
class SearchViewSet(GeoModelAPIView, viewsets.ViewSetMixin, generics.ListAPIView):
def get_serializer_class(self):
if self.request.version == 'v0.1':
return SearchSerializerV0_1
return SearchSerializer
def list(self, request, *args, **kwargs):
languages = [x[0] for x in settings.LANGUAGES]
# If the incoming language is not specified, go with the default.
self.lang_code = request.query_params.get('language', languages[0])
if self.lang_code not in languages:
raise ParseError("Invalid language supplied. Supported languages: %s" %
','.join(languages))
params = request.query_params
input_val = params.get('input', '').strip()
q_val = params.get('q', '').strip()
if not input_val and not q_val:
raise ParseError("Supply search terms with 'q=' or autocomplete entry with 'input='")
if input_val and q_val:
raise ParseError("Supply either 'q' or 'input', not both")
old_language = translation.get_language()[:2]
translation.activate(self.lang_code)
queryset = SearchQuerySet()
if input_val:
queryset = queryset.filter(autosuggest=input_val)
else:
queryset = queryset.filter(text=AutoQuery(q_val))
models = None
types = params.get('type', '').split(',')
if types:
models = set()
for t in types:
if t == 'event':
models.add(Event)
elif t == 'place':
models.add(Place)
if self.request.version == 'v0.1':
if len(models) == 0:
models.add(Event)
if len(models) == 1 and Event in models:
start = params.get('start', None)
if start:
dt = parse_time(start, is_start=True)
queryset = queryset.filter(Q(end_time__gt=dt) | Q(start_time__gte=dt))
end = params.get('end', None)
if end:
dt = parse_time(end, is_start=False)
queryset = queryset.filter(Q(end_time__lt=dt) | Q(start_time__lte=dt))
if not start and not end and hasattr(queryset.query, 'add_decay_function'):
# If no time-based filters are set, make the relevancy score
# decay the further in the future the event is.
now = datetime.utcnow()
queryset = queryset.filter(end_time__gt=now).decay({
'gauss': {
'end_time': {
'origin': now,
'scale': DATE_DECAY_SCALE
}
}
})
if len(models) > 0:
queryset = queryset.models(*list(models))
self.object_list = queryset.load_all()
page = self.paginate_queryset(self.object_list)
if page is not None:
serializer = self.get_serializer(page, many=True)
resp = self.get_paginated_response(serializer.data)
translation.activate(old_language)
return resp
serializer = self.get_serializer(self.object_list, many=True)
resp = Response(serializer.data)
translation.activate(old_language)
return resp
register_view(SearchViewSet, 'search', base_name='search')
```
#### File: events/exporter/base.py
```python
import logging
import os
class Exporter(object):
def __init__(self, options=None):
self.options = options
self.logger = logging.getLogger(__name__)
self.setup()
def setup(self):
pass
exporters = {}
def register_exporter(klass):
exporters[klass.name] = klass
return klass
def get_exporters():
if exporters:
return exporters
for fname in os.listdir(os.path.dirname(__file__)):
module, ext = os.path.splitext(fname)
if ext.lower() != '.py':
continue
if module in ('__init__', 'base'):
continue
full_path = "%s.%s" % (__package__, module)
ret = __import__(full_path, locals(), globals())
return exporters
```
#### File: events/tests/test_event_put.py
```python
from datetime import timedelta
from django.utils import timezone
import pytest
from .utils import versioned_reverse as reverse
from events.tests.utils import assert_event_data_is_equal
from events.tests.test_event_post import create_with_post
from .conftest import DATETIME
from events.models import Event
from django.conf import settings
# === util methods ===
def update_with_put(api_client, event_id, event_data, credentials=None):
if credentials:
api_client.credentials(**credentials)
response = api_client.put(event_id, event_data, format='json')
return response
# === tests ===
@pytest.mark.django_db
def test__update_a_draft_with_put(api_client, minimal_event_dict, user):
# create an event
api_client.force_authenticate(user=user)
minimal_event_dict.pop('location')
minimal_event_dict.pop('keywords')
minimal_event_dict['publication_status'] = 'draft'
response = create_with_post(api_client, minimal_event_dict)
assert_event_data_is_equal(minimal_event_dict, response.data)
data2 = response.data
print('got the post response')
print(data2)
# store updates
event_id = data2.pop('@id')
response2 = update_with_put(api_client, event_id, data2)
print('got the put response')
print(response2.data)
# assert
assert_event_data_is_equal(data2, response2.data)
@pytest.mark.django_db
def test__update_an_event_with_put(api_client, complex_event_dict, user):
# create an event
api_client.force_authenticate(user=user)
response = create_with_post(api_client, complex_event_dict)
# dummy inputs
TEXT = 'text updated'
URL = "http://localhost"
# set up updates
data2 = response.data
for key in ('name', ):
for lang in ('fi', 'en', 'sv'):
if lang in data2[key]:
data2[key][lang] = '%s updated' % data2[key][lang]
data2['offers'] = [
{
"is_free": False,
"price": {"en": TEXT, "sv": TEXT, "fi": TEXT},
"description": {"en": TEXT, "fi": TEXT},
"info_url": {"en": URL, "sv": URL, "fi": URL}
}
]
data2['keywords'] = data2['keywords'][:1]
data2['in_language'] = data2['in_language'][:2]
# store updates
event_id = data2.pop('@id')
response2 = update_with_put(api_client, event_id, data2)
# assert
assert_event_data_is_equal(data2, response2.data)
@pytest.mark.django_db
def test__reschedule_an_event_with_put(api_client, complex_event_dict, user):
# create an event
api_client.force_authenticate(user=user)
response = create_with_post(api_client, complex_event_dict)
# create a new datetime
new_datetime = (timezone.now() + timedelta(days=3)).isoformat()
data2 = response.data
data2['start_time'] = new_datetime
data2['end_time'] = new_datetime
# update the event
event_id = data2.pop('@id')
response2 = update_with_put(api_client, event_id, data2)
# assert backend rescheduled the event
data2['event_status'] = 'EventRescheduled'
assert_event_data_is_equal(data2, response2.data)
# try to cancel marking as rescheduled
data2['event_status'] = 'EventScheduled'
response3 = api_client.put(event_id, data2, format='json')
# assert the event does not revert back to scheduled
assert response3.status_code == 400
assert 'event_status' in response3.data
# create a new datetime again
new_datetime = (timezone.now() + timedelta(days=3)).isoformat()
data2 = response2.data
data2['start_time'] = new_datetime
data2['end_time'] = new_datetime
# update the event again
response2 = update_with_put(api_client, event_id, data2)
# assert the event remains rescheduled
data2['event_status'] = 'EventRescheduled'
assert_event_data_is_equal(data2, response2.data)
@pytest.mark.django_db
def test__postpone_an_event_with_put(api_client, complex_event_dict, user):
# create an event
api_client.force_authenticate(user=user)
response = create_with_post(api_client, complex_event_dict)
# remove the start_time
data2 = response.data
data2['start_time'] = None
# update the event
event_id = data2.pop('@id')
response2 = update_with_put(api_client, event_id, data2)
# assert backend postponed the event
data2['event_status'] = 'EventPostponed'
assert_event_data_is_equal(data2, response2.data)
# try to cancel marking as postponed
data2 = response2.data
data2['event_status'] = 'EventScheduled'
response3 = api_client.put(event_id, data2, format='json')
# assert the event does not revert back to scheduled
assert response3.status_code == 400
assert 'event_status' in response2.data
# reschedule and try to cancel marking
new_datetime = (timezone.now() + timedelta(days=3)).isoformat()
data2['start_time'] = new_datetime
data2['end_time'] = new_datetime
data2['event_status'] = 'EventScheduled'
response3 = api_client.put(event_id, data2, format='json')
# assert the event does not revert back to scheduled
assert response3.status_code == 400
assert 'event_status' in response3.data
# reschedule, but do not try to cancel marking
data2 = response2.data
new_datetime = (timezone.now() + timedelta(days=3)).isoformat()
data2['start_time'] = new_datetime
data2['end_time'] = new_datetime
data2.pop('event_status')
event_id = data2.pop('@id')
response2 = update_with_put(api_client, event_id, data2)
# assert the event is marked rescheduled
data2['event_status'] = 'EventRescheduled'
assert_event_data_is_equal(data2, response2.data)
@pytest.mark.django_db
def test__cancel_an_event_with_put(api_client, complex_event_dict, user):
# create an event
api_client.force_authenticate(user=user)
response = create_with_post(api_client, complex_event_dict)
# mark the event cancelled
data2 = response.data
data2['event_status'] = 'EventCancelled'
# update the event
event_id = data2.pop('@id')
response2 = update_with_put(api_client, event_id, data2)
# assert backend cancelled the event
data2['event_status'] = 'EventCancelled'
assert_event_data_is_equal(data2, response2.data)
@pytest.mark.django_db
def test__cancel_a_postponed_event_with_put(api_client, complex_event_dict, user):
# create an event
api_client.force_authenticate(user=user)
response = create_with_post(api_client, complex_event_dict)
# remove the start_time
data2 = response.data
data2['start_time'] = None
# update the event
event_id = data2.pop('@id')
response2 = update_with_put(api_client, event_id, data2)
# assert backend postponed the event
data2['event_status'] = 'EventPostponed'
assert_event_data_is_equal(data2, response2.data)
# mark the event cancelled
data2 = response.data
data2['event_status'] = 'EventCancelled'
# update the event
response2 = update_with_put(api_client, event_id, data2)
# assert backend cancelled the event
data2['event_status'] = 'EventCancelled'
assert_event_data_is_equal(data2, response2.data)
@pytest.mark.django_db
def test__cancel_a_rescheduled_event_with_put(api_client, complex_event_dict, user):
# create an event
api_client.force_authenticate(user=user)
response = create_with_post(api_client, complex_event_dict)
# create a new datetime
new_datetime = (timezone.now() + timedelta(days=3)).isoformat()
data2 = response.data
data2['start_time'] = new_datetime
data2['end_time'] = new_datetime
# update the event
event_id = data2.pop('@id')
response2 = update_with_put(api_client, event_id, data2)
# assert backend rescheduled the event
data2['event_status'] = 'EventRescheduled'
assert_event_data_is_equal(data2, response2.data)
# mark the event cancelled
data2 = response.data
data2['event_status'] = 'EventCancelled'
# update the event
response2 = update_with_put(api_client, event_id, data2)
# assert backend cancelled the event
data2['event_status'] = 'EventCancelled'
assert_event_data_is_equal(data2, response2.data)
@pytest.mark.django_db
def test__reschedule_a_cancelled_event_with_put(api_client, complex_event_dict, user):
# create an event
api_client.force_authenticate(user=user)
response = create_with_post(api_client, complex_event_dict)
# mark the event cancelled
data2 = response.data
data2['event_status'] = 'EventCancelled'
# update the event
event_id = data2.pop('@id')
response2 = update_with_put(api_client, event_id, data2)
# assert backend cancelled the event
data2['event_status'] = 'EventCancelled'
assert_event_data_is_equal(data2, response2.data)
# create a new datetime and remove the cancelled status
new_datetime = (timezone.now() + timedelta(days=3)).isoformat()
data3 = response2.data
data3['start_time'] = new_datetime
data3['end_time'] = new_datetime
data3.pop('event_status')
# update the event
event_id = data3.pop('@id')
response3 = update_with_put(api_client, event_id, data3)
# assert backend rescheduled the event
data3['event_status'] = 'EventRescheduled'
assert_event_data_is_equal(data3, response3.data)
# the following values may not be posted
@pytest.mark.django_db
@pytest.mark.parametrize("non_permitted_input,non_permitted_response", [
({'id': 'not_allowed:1'}, 400), # may not fake id
({'id': settings.SYSTEM_DATA_SOURCE_ID + ':changed'}, 400), # may not change object id
({'data_source': 'theotherdatasourceid'}, 400), # may not fake data source
({'publisher': 'test_organization2'}, 400), # may not fake organization
])
def test__non_editable_fields_at_put(api_client, minimal_event_dict, user,
non_permitted_input, non_permitted_response):
# create the event first
api_client.force_authenticate(user)
response = create_with_post(api_client, minimal_event_dict)
data2 = response.data
event_id = data2.pop('@id')
# try to put non permitted values
data2.update(non_permitted_input)
response2 = api_client.put(event_id, data2, format='json')
assert response2.status_code == non_permitted_response
if non_permitted_response >= 400:
# check that there is an error message for the corresponding field
assert list(non_permitted_input)[0] in response2.data
@pytest.mark.django_db
def test__a_non_admin_cannot_update_an_event(api_client, event, complex_event_dict, user):
event.publisher.admin_users.remove(user)
api_client.force_authenticate(user)
detail_url = reverse('event-detail', kwargs={'pk': event.pk})
response = update_with_put(api_client, detail_url, complex_event_dict)
assert response.status_code == 403
@pytest.mark.django_db
def test__correct_api_key_can_update_an_event(api_client, event, complex_event_dict, data_source, organization):
data_source.owner = organization
data_source.save()
detail_url = reverse('event-detail', kwargs={'pk': event.pk})
response = update_with_put(api_client, detail_url, complex_event_dict,
credentials={'apikey': data_source.api_key})
assert response.status_code == 200
@pytest.mark.django_db
def test__wrong_api_key_cannot_update_an_event(api_client, event, complex_event_dict, data_source, other_data_source,
organization, organization2):
data_source.owner = organization
data_source.save()
other_data_source.owner = organization2
other_data_source.save()
del(complex_event_dict['publisher'])
detail_url = reverse('event-detail', kwargs={'pk': event.pk})
response = update_with_put(api_client, detail_url, complex_event_dict,
credentials={'apikey': other_data_source.api_key})
print(response.data)
assert response.status_code == 403
@pytest.mark.django_db
def test__api_key_without_organization_cannot_update_an_event(api_client, event, complex_event_dict, data_source):
detail_url = reverse('event-detail', kwargs={'pk': event.pk})
response = update_with_put(api_client, detail_url, complex_event_dict,
credentials={'apikey': data_source.api_key})
assert response.status_code == 403
@pytest.mark.django_db
def test__unknown_api_key_cannot_update_an_event(api_client, event, complex_event_dict):
detail_url = reverse('event-detail', kwargs={'pk': event.pk})
response = update_with_put(api_client, detail_url, complex_event_dict,
credentials={'apikey': 'unknown'})
assert response.status_code == 401
@pytest.mark.django_db
def test__empty_api_key_cannot_update_an_event(api_client, event, complex_event_dict,):
detail_url = reverse('event-detail', kwargs={'pk': event.pk})
response = update_with_put(api_client, detail_url, complex_event_dict,
credentials={'apikey': ''})
assert response.status_code == 401
```
#### File: events/tests/test_user_get.py
```python
import pytest
from .utils import get, versioned_reverse as reverse, assert_fields_exist
# === util methods ===
def get_list(api_client, version='v1'):
list_url = reverse('user-list', version=version)
return get(api_client, list_url)
def get_detail(api_client, detail_pk, version='v1'):
detail_url = reverse('user-detail', version=version, kwargs={'pk': detail_pk})
return get(api_client, detail_url)
def assert_user_fields_exist(data, version='v1'):
# TODO: incorporate version parameter into version aware
# parts of test code
fields = (
'last_login',
'username',
'email',
'date_joined',
'first_name',
'last_name',
'uuid',
'department_name',
'organization',
'is_staff',
'display_name',
)
assert_fields_exist(data, fields)
# === tests ===
@pytest.mark.django_db
def test__get_user_list(api_client, user, organization):
organization.admin_users.add(user)
api_client.force_authenticate(user=user)
response = get_detail(api_client, user.pk)
print(response.data)
assert_user_fields_exist(response.data)
```
#### File: linkedevents/events/utils.py
```python
import re
import collections
def convert_to_camelcase(s):
return ''.join(word.title() if i else word for i, word in enumerate(
s.split('_')))
def convert_from_camelcase(s):
return re.sub(r'(^|[a-z])([A-Z])',
lambda m: '_'.join([i.lower() for i in m.groups() if i]), s)
def get_value_from_tuple_list(list_of_tuples, search_key, value_index):
"""
Find "value" from list of tuples by using the other value in tuple as a
search key and other as a returned value
:param list_of_tuples: tuples to be searched
:param search_key: search key used to find right tuple
:param value_index: Index telling which side of tuple is
returned and which is used as a key
:return: Value from either side of tuple
"""
for i, v in enumerate(list_of_tuples):
if v[value_index ^ 1] == search_key:
return v[value_index]
def update(d, u):
"""
Recursively update dict d with
values at all levels of dict u
"""
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
``` |
{
"source": "6aika/o3-6a-kkhprp",
"score": 2
} |
#### File: api/views/services.py
```python
from django.utils import translation
from rest_framework.filters import BaseFilterBackend
from rest_framework.generics import ListAPIView
from issues.api.serializers import ServiceSerializer
from issues.models import Service
class ServiceFilter(BaseFilterBackend):
def filter_queryset(self, request, queryset, view):
jurisdiction_id = request.query_params.get('jurisdiction_id')
if jurisdiction_id:
queryset = queryset.filter(jurisdictions__identifier=jurisdiction_id)
return queryset
class ServiceList(ListAPIView):
item_tag_name = 'service'
root_tag_name = 'services'
serializer_class = ServiceSerializer
queryset = Service.objects.all().order_by('service_code')
filter_backends = (
ServiceFilter,
)
def dispatch(self, request, *args, **kwargs):
locale = (request.GET.get("locale") or translation.get_language())
with translation.override(locale):
return super().dispatch(request, *args, **kwargs)
```
#### File: o3-6a-kkhprp/issues_citysdk/extension.py
```python
from django.core.exceptions import ObjectDoesNotExist
from rest_framework import serializers
from rest_framework.exceptions import ValidationError
from issues.extensions import IssueExtension
class CitySDKExtension(IssueExtension):
identifier = 'citysdk'
related_name = 'citysdk'
def filter_issue_queryset(self, request, queryset, view):
search = request.query_params.get('search')
service_object_id = request.query_params.get('service_object_id')
service_object_type = request.query_params.get('service_object_type')
if bool(service_object_id) ^ bool(service_object_type):
raise ValidationError(
"Both service_object_id and service_object_type or neither of them must be included in a request."
)
if search:
queryset = (
queryset.filter(description__icontains=search) |
queryset.filter(citysdk__title__icontains=search) |
queryset.filter(address__icontains=search) |
queryset.filter(agency_responsible__icontains=search)
)
if service_object_type:
queryset = queryset.filter(citysdk__service_object_type__icontains=service_object_type)
if service_object_id:
queryset = queryset.filter(citysdk__service_object_id=service_object_id)
return queryset
def get_extended_attributes(self, issue, context=None):
try:
cs_ext = issue.citysdk
except ObjectDoesNotExist:
return None
return {
'service_object_type': cs_ext.service_object_type,
'service_object_id': cs_ext.service_object_id,
'detailed_status': cs_ext.detailed_status,
'title': cs_ext.title,
}
def extend_issue_serializer(self, serializer):
serializer.fields['service_object_id'] = serializers.CharField(write_only=True, required=False)
serializer.fields['service_object_type'] = serializers.CharField(write_only=True, required=False)
serializer.fields['title'] = serializers.CharField(write_only=True, required=False)
def validate_issue_data(self, serializer, data):
if bool(data.get('service_object_id')) ^ bool(data.get('service_object_type')):
raise ValidationError('both service_object_id and service_object_type must be set if one is')
return data
def post_create_issue(self, request, issue, data):
from issues_citysdk.models import Issue_CitySDK
service_object_id = data.pop('service_object_id', None)
service_object_type = data.pop('service_object_type', None)
ext_data = {}
if service_object_id and service_object_type:
ext_data.update(
service_object_id=service_object_id,
service_object_type=service_object_type,
)
title = data.pop('title', None)
if title:
ext_data['title'] = title
if ext_data:
Issue_CitySDK.objects.create(issue=issue, **ext_data)
```
#### File: o3-6a-kkhprp/issues_geometry/apps.py
```python
from django.apps import AppConfig
from django.core.exceptions import ImproperlyConfigured
from issues.gis import determine_gissiness
from issues_geometry.extension import GeometryExtension
class IssuesGeometryConfig(AppConfig):
name = 'issues_geometry'
issue_extension = GeometryExtension
verbose_name = 'Issues: Geometry Extensions'
def ready(self):
if not determine_gissiness(): # pragma: no cover
raise ImproperlyConfigured('the geometry extension requires a GIS-enabled database')
```
#### File: issues_geometry/tests/test_geo_api.py
```python
import json
import re
import pytest
import sys
from django.conf import settings
from django.urls import reverse
from issues.tests.conftest import mf_api_client, random_service # noqa
from issues.tests.schemata import LIST_OF_ISSUES_SCHEMA
from issues.tests.utils import ISSUE_LIST_ENDPOINT, get_data_from_response, verify_issue
from issues_geometry.validation import GeoJSONValidator
if 'issues_geometry' not in settings.INSTALLED_APPS:
pytestmark = pytest.mark.skip('issues_geometry app disabled')
AURAJOKIRANTA_GEOJSON = {
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [
[
[
22.264137268066406,
60.440030997851935
],
[
22.25804328918457,
60.43943818738373
],
[
22.254438400268555,
60.44155531797
],
[
22.251176834106445,
60.443799325795936
],
[
22.25701332092285,
60.44617018455179
],
[
22.265253067016598,
60.44824454404312
],
[
22.268171310424805,
60.449599156297516
],
[
22.27005958557129,
60.44875253025832
],
[
22.273406982421875,
60.448837193855205
],
[
22.27804183959961,
60.44998013081624
],
[
22.281217575073242,
60.44735554905158
],
[
22.27890014648437,
60.445492813989986
],
[
22.268428802490234,
60.442656171363225
],
[
22.264137268066406,
60.440030997851935
]
]
]
}
}
@pytest.mark.parametrize('geometry_data', [None, AURAJOKIRANTA_GEOJSON, AURAJOKIRANTA_GEOJSON['geometry']])
def test_post_geometry(random_service, mf_api_client, geometry_data):
if sys.version_info[0] == 2 and mf_api_client.format != 'json':
pytest.xfail('unsupported')
from issues_geometry.models import IssueGeometry
post_data = {
'extensions': 'geometry',
'service_code': random_service.service_code,
'description': 'Olut on loppu koko jokirannasta',
'geometry': (
json.dumps(geometry_data)
if geometry_data
else ''
),
}
if not post_data.get('geometry'):
post_data['address'] = 'foo street'
response = mf_api_client.post(ISSUE_LIST_ENDPOINT, data=post_data)
content = get_data_from_response(
response,
status_code=201,
schema=LIST_OF_ISSUES_SCHEMA,
)
issue_data = content[0]
issue = verify_issue(issue_data)
if not geometry_data:
# This exercises the code path where one requests the geometry extension
# but doesn't actually post geometry after all.
return
# No matter the format, we should always have a GeoJSON fragment, whether encoded or indented, in there:
assert re.search(r'\\*"type\\*":\s*\\*"Polygon\\*"', response.content.decode('utf8'))
assert IssueGeometry.objects.filter(issue=issue).exists()
retrieved_issue_data = get_data_from_response(
mf_api_client.get(
reverse('georeport/v2:issue-detail', kwargs={'identifier': issue.identifier}),
{'extensions': 'geometry'},
)
)[0]
for data in (issue_data, retrieved_issue_data):
verify_issue(data)
if mf_api_client.format == 'json':
# We can't access the extended attribute correctly when it has been mangled by the
# test harness, so only test it when doing native JSON.
GeoJSONValidator.validate(data['extended_attributes']['geometry'])
def test_post_invalid_json(random_service, mf_api_client):
response = get_data_from_response(
mf_api_client.post(
ISSUE_LIST_ENDPOINT,
{
'extensions': 'geometry',
'service_code': random_service.service_code,
'description': 'Miten tätä ajetaan?',
'geometry': json.dumps(['oops']),
}
),
status_code=400
)
assert 'JSON' in str(response) # Yeah, it complains about JSON, that's fine
def test_post_invalid_geojson(random_service, mf_api_client):
response = get_data_from_response(
mf_api_client.post(
ISSUE_LIST_ENDPOINT,
{
'extensions': 'geometry',
'service_code': random_service.service_code,
'description': 'Miten tätä ajetaan?',
'geometry': json.dumps({'tepe': 'palygon'}),
},
),
status_code=400
)
assert 'Invalid GeoJSON' in str(response) # Complains about GeoJSON, that's nice
```
#### File: o3-6a-kkhprp/issues_simple_ui/admin.py
```python
from django.contrib import admin
from django.forms.widgets import RadioSelect
from parler.admin import TranslatableAdmin
from issues_simple_ui.enums import CONTENT_IDENTIFIERS, IMAGE_IDENTIFIERS
from issues_simple_ui.models import Content, Image
class ContentAdmin(TranslatableAdmin):
list_display = ('identifier', 'all_languages_column')
def get_form(self, request, obj=None, **kwargs):
form = super().get_form(request, obj, **kwargs)
form.base_fields['identifier'].widget = RadioSelect(choices=CONTENT_IDENTIFIERS)
return form
class ImageAdmin(admin.ModelAdmin):
def get_form(self, request, obj=None, **kwargs):
form = super().get_form(request, obj, **kwargs)
form.base_fields['identifier'].widget = RadioSelect(choices=IMAGE_IDENTIFIERS)
return form
admin.site.register(Content, ContentAdmin)
admin.site.register(Image, ImageAdmin)
```
#### File: issues_simple_ui/templatetags/simple_ui_tags.py
```python
import json
from django import template
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist
from django.middleware.csrf import get_token
from django.utils.safestring import mark_safe
from django.utils.translation import get_language
from issues.excs import InvalidAppError
from issues.models import Application
from issues_simple_ui.models import Content, Image
register = template.Library()
@register.simple_tag
def get_content(identifier):
return Content.retrieve(identifier)
@register.simple_tag
def get_image(identifier):
try:
return Image.objects.get(identifier=identifier).file.url
except ObjectDoesNotExist:
return None
@register.simple_tag(takes_context=True)
def get_config_json(context, **extra):
"""
Get a JSON blob that is used to configure the frontend JavaScript.
:param context: Django rendering context
:param extra: Extra attributes to inject
:return:
"""
request = context['request']
user = request.user
if user and user.is_authenticated:
extra['csrf_token'] = get_token(request)
try:
application = Application.autodetermine()
except InvalidAppError:
application, _ = Application.objects.get_or_create(
identifier='simple_ui',
defaults={'name': 'Simple UI'},
)
return mark_safe(json.dumps(dict({
'language': get_language().split('-')[0],
'api_key': application.key,
'api_root': f'/{settings.GEOREPORT_API_ROOT}',
'map_settings': { # TODO: Make this configurable
'center': [60.1699, 24.9384],
'tileUrl': 'http://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png',
'attribution': '© <a href="https://www.openstreetmap.org/copyright">OpenStreetMap</a>',
'subdomains': ['a', 'b', 'c']
},
}, **extra)))
```
#### File: issues_simple_ui/views/simple_content_view.py
```python
from django.views.generic import TemplateView
from issues_simple_ui.models import Content
class SimpleContentView(TemplateView):
"""
Base class for views that inject `title` and `content` into the context based on Content objects.
"""
content_identifier = None
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update(Content.retrieve(self.content_identifier))
return context
```
#### File: issues/tests/conftest.py
```python
import pytest
from django.utils.crypto import get_random_string
from rest_framework.test import APIClient
from issues.models import Issue, Service
from issues.tests.db_utils import execute_fixture
def pytest_configure():
# During tests, crypt passwords with MD5. This should make things run faster.
from django.conf import settings
settings.PASSWORD_HASHERS = (
'django.contrib.auth.hashers.MD5PasswordHasher',
'django.contrib.auth.hashers.PBKDF2PasswordHasher',
'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher',
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
'django.contrib.auth.hashers.SHA1PasswordHasher',
'django.contrib.auth.hashers.CryptPasswordHasher',
)
@pytest.fixture()
def admin_api_client(admin_user):
api_client = APIClient()
api_client.login(username=admin_user.username, password="password")
api_client.user = admin_user
return api_client
@pytest.fixture()
def api_client():
return APIClient()
class FormatEnforcingAPIClient(APIClient):
format = None # Set by the fixture
def get(self, path, data=None, follow=False, **extra):
if not data:
data = {}
data["format"] = self.format
resp = super().get(path, data, follow, **extra)
self._check_response_format(resp)
return resp
def post(self, path, data=None, format=None, content_type=None, follow=False, **extra):
assert not format
assert not content_type
resp = super().post(self._format_path(path), data=data, follow=follow, **extra)
self._check_response_format(resp)
return resp
def _check_response_format(self, resp):
if resp.status_code < 400:
if self.format == "sjson":
assert "json" in resp["Content-Type"]
else:
assert self.format in resp["Content-Type"]
def _format_path(self, path):
return f"{path}{'&' if '?' in path else '?'}format={self.format}"
@pytest.fixture(params=['xml', 'json', 'sjson'])
def mf_api_client(request):
# mf_api_client is short for multiformat_api_client, fwiw. :)
feac = FormatEnforcingAPIClient()
feac.format = request.param
return feac
@pytest.fixture()
def random_service(db):
return Service.objects.create(
service_code=get_random_string(12),
service_name="Test"
)
@pytest.fixture()
def testing_issues(db):
execute_fixture('insert_requests')
return Issue.objects.all()
```
#### File: issues/tests/utils.py
```python
import json
import jsonschema
from django.urls import reverse_lazy
from issues.api.transforms import transform_xml_to_json
ISSUE_LIST_ENDPOINT = reverse_lazy('georeport/v2:issue-list')
def get_data_from_response(response, status_code=200, schema=None):
if status_code: # pragma: no branch
assert response.status_code == status_code, (
f"Status code mismatch ({response.status_code} is not the expected {status_code})"
)
if response["Content-Type"].startswith("application/xml"):
response.xml = response.content
response.content = transform_xml_to_json(response.content)
response["Content-Type"] = "application/json"
data = json.loads(response.content.decode('utf-8'))
if schema and response.status_code < 400:
jsonschema.validate(data, schema)
return data
ISSUE_VERIFICATION_FIELDS = [
('service_request_id', 'identifier'),
] + [(n, n) for n in [
"description",
"status",
"status_notes",
"agency_responsible",
"service_notice",
"address",
]]
def verify_issue(data, issue=None):
"""
Verify the given data describes the issue passed in.
If not issue is passed in, it's retrieved from the local database for convenience
Does not do schema validation, though.
:type issue: issues.models.Issue|None
:type data: dict
"""
if issue is None:
from issues.models import Issue
issue = Issue.objects.get(identifier=data['service_request_id'])
for data_field, issue_field in ISSUE_VERIFICATION_FIELDS:
if issue_field is None:
issue_field = data_field
if callable(issue_field):
issue_value = issue_field(issue)
else:
issue_value = getattr(issue, issue_field, None)
if data_field in data or issue_value:
assert data[data_field] == issue_value
if issue.location:
lon, lat = issue.location
assert close_enough(float(data["long"]), lon)
assert close_enough(float(data["lat"]), lat)
return issue # for use in `assert verify_issue()`
def close_enough(a, b, epsilon=0.001):
distance = abs(a - b)
assert distance < epsilon, f"{a} and {b} have distance {distance} (should be < {epsilon})"
return True # for use in `assert close_enough()`
``` |
{
"source": "6ambar1ku/white_workers",
"score": 3
} |
#### File: 6ambar1ku/white_workers/compare_dataset_jsonl.py
```python
import pandas as pd
import ssdeep
from tqdm import tqdm
def comp(ssdeep1):
# ファイルから1行ずつ読み込む
dic = {}
#読み込みたいjsonlファイルのパスを以下に埋め込んでください
with open("<your data path>/*.jsonl", "r") as f:
row = f.readline() #1行目を読み込む
i = 0
pbar = tqdm(total=75000)
while row:
sha256 = "".join(extract(row, "sha256"))
ssdeep2 = "".join(extract(row, "ssdeep"))
compare_value = ssdeep.compare(ssdeep1,ssdeep2)
dic[sha256] = compare_value
row = f.readline() #次の行を読み込む
i += 1
pbar.update(1)
pbar.close()
dic_sorted = sorted(dic.items(), key=lambda x:x[1], reverse = True)
return dic_sorted
def extract(str_origin, str_find):
index_str = str_origin.find(str_find)
i = index_str
count = 0
while 1:
if str_origin[i] == '"':
count += 1
i += 1
if count == 2:
break
str_return = []
while 1:
str_return.append(str_origin[i])
i += 1
if str_origin[i] == '"':
break
return str_return
``` |
{
"source": "6Arin9/wulkanowy-web",
"score": 3
} |
#### File: app/API/timetable.py
```python
import json
import requests
from bs4 import BeautifulSoup
from .generate_cookies import autogenerate_cookies
def get_timetable(register_id, students, oun, s, date):
cookies = autogenerate_cookies(students, s)
with open('app/API/headers.json') as f:
headers = json.load(f)
timetable = requests.post(oun+'/PlanZajec.mvc/Get', headers=headers, cookies=cookies, json={'data': date})
return timetable.json()
```
#### File: wulkanowy-web/app/views.py
```python
from requests import get
from cryptography.fernet import Fernet
from django.contrib.sessions.backends.db import SessionStore
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render
import json
import requests
from rest_framework.decorators import api_view
from django.core import serializers
from django.shortcuts import redirect
from django.contrib.sessions.models import Session
from .login import sender
from .API.grades import get_grades
from .API.exams import get_exams
from .API.timetable import get_timetable
from .API.notes import get_notes
from .API.attendance import get_attendance
from .API.messages import get_received_messages, get_sent_messages, get_deleted_messages, get_recipients, send_message, get_message_content
from .API.homeworks import get_homeworks
from .API.mobile_access import get_registered_devices, register_device
from .API.school_data import get_school_data
from .API.dashboard import get_dashboard
from .API.student_data import get_student_data
from .API.stats import get_partial, get_year
from .decrypt import decrypt_cookies
import datetime
#API
@api_view(['POST'])
def login(request, *args, **kwargs):
data = json.loads(request.body)
loginName = data['loginName']
Password = data['Password']
symbol = data['Symbol']
diary_url = data['diaryUrl']
if diary_url != 'http://cufs.fakelog.tk/':
link = f'{diary_url}{symbol}/Account/LogOn?ReturnUrl=%2F{symbol}%2FFS%2FLS%3Fwa%3Dwsignin1.0%26wtrealm%3Dhttps%253a%252f%252fuonetplus.vulcan.net.pl%252f{symbol}%252fLoginEndpoint.aspx%26wctx%3Dhttps%253a%252f%252fuonetplus.vulcan.net.pl%252f{symbol}%252fLoginEndpoint.aspx'
else:
link = 'http://cufs.fakelog.tk/powiatwulkanowy/FS/LS?wa=wsignin1.0&wtrealm=http://uonetplus.fakelog.localhost:300/powiatwulkanowy/LoginEndpoint.aspx&wctx=http://uonetplus.fakelog.localhost:300/powiatwulkanowy/LoginEndpoint.aspx'
s = requests.Session()
sender_return = sender(link, loginName, Password, ('loginName', 'Password'), 'Zła nazwa użytkownika lub hasło', symbol, diary_url, s)
if sender_return == {'success': False}:
data_response = {
'success': False
}
else:
request.session['is_logged'] = True
request.session[request.session.session_key] = Fernet.generate_key().decode('utf-8')
rkey = Fernet(bytes(request.session[request.session.session_key], 'utf-8'))
sender_return['s'] = json.dumps(sender_return['s'])
sender_return['s'] = sender_return['s'].encode()
sender_return['s'] = rkey.encrypt(sender_return['s'])
sender_return['s'] = sender_return['s'].decode('utf-8')
data_response = {'success': True, 'data': sender_return}
return JsonResponse(data_response)
@api_view(['POST'])
def grades(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
grades = get_grades(register_id, students, school_url, s)
return JsonResponse(grades)
else:
return redirect('../')
@api_view(['POST'])
def timetable(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
week = data['week']
data = json.loads(data['cookies'])
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
now = datetime.datetime.now()
weekday = now.weekday()
for x in range(7):
if weekday == x:
now = now - datetime.timedelta(days=x)
now = now + datetime.timedelta(days=week*7)
day = now.day
month = now.month
year = now.year
date = datetime.date(year, month, day).isoformat()
date = f'{date}T00:00:00'
timetable = get_timetable(register_id, students, school_url, s, date)
return JsonResponse(timetable)
else:
return redirect('../')
@api_view(['POST'])
def exams(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
week = data['week']
data = json.loads(data['cookies'])
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
now = datetime.datetime.now()
weekday = now.weekday()
for x in range(7):
if weekday == x:
now = now - datetime.timedelta(days=x)
now = now + datetime.timedelta(days=week*7)
day = now.day
month = now.month
year = now.year
date = datetime.date(year, month, day).isoformat()
date = f'{date}T00:00:00'
school_year = data['data']['school_year']
exams = get_exams(register_id, students, school_url, s, date, school_year)
return JsonResponse(exams)
else:
return redirect('../')
@api_view(['POST'])
def homeworks(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
week = data['week']
data = json.loads(data['cookies'])
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
now = datetime.datetime.now()
weekday = now.weekday()
for x in range(7):
if weekday == x:
now = now - datetime.timedelta(days=x)
now = now + datetime.timedelta(days=week*7)
day = now.day
month = now.month
year = now.year
date = datetime.date(year, month, day).isoformat()
date = f'{date}T00:00:00'
school_year = data['data']['school_year']
homeworks = get_homeworks(register_id, students, school_url, s, date, school_year)
return JsonResponse(homeworks)
else:
return redirect('../')
@api_view(['POST'])
def attendance(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
week = data['week']
data = json.loads(data['cookies'])
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
now = datetime.datetime.now()
weekday = now.weekday()
for x in range(7):
if weekday == x:
now = now - datetime.timedelta(days=x)
now = now + datetime.timedelta(days=week*7)
day = now.day
month = now.month
year = now.year
date = datetime.date(year, month, day).isoformat()
date = f'{date}T00:00:00'
attendance = get_attendance(register_id, students, school_url, s, date)
return JsonResponse(attendance, safe=False)
else:
return redirect('../')
@api_view(['POST'])
def notes(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
notes = get_notes(register_id, students, school_url, s)
return JsonResponse(notes)
else:
return redirect('../')
@api_view(['POST'])
def registered_devices(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
registered = get_registered_devices(register_id, students, school_url, s)
return JsonResponse(registered)
else:
return redirect('../')
@api_view(['POST'])
def register_device_(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
register_data = register_device(register_id, students, school_url, s)
return JsonResponse(register_data)
else:
return redirect('../')
@api_view(['POST'])
def received_messages(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
date = data['data']['date']
school_year = data['data']['school_year']
symbol = data['data']['symbol']
received_messages = get_received_messages(register_id, students, school_url, s, date, school_year, symbol)
return JsonResponse(received_messages)
else:
return redirect('../')
@api_view(['POST'])
def sent_messages(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
date = data['data']['date']
school_year = data['data']['school_year']
symbol = data['data']['symbol']
sent_messages = get_sent_messages(register_id, students, school_url, s, date, school_year, symbol)
return JsonResponse(sent_messages)
else:
return redirect('../')
@api_view(['POST'])
def deleted_messages(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
date = data['data']['date']
school_year = data['data']['school_year']
symbol = data['data']['symbol']
deleted_messages = get_deleted_messages(register_id, students, school_url, s, date, school_year, symbol)
return JsonResponse(deleted_messages)
else:
return redirect('../')
@api_view(['POST'])
def recipients(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
date = data['data']['date']
school_year = data['data']['school_year']
symbol = data['data']['symbol']
recipients = get_recipients(register_id, students, school_url, s, date, school_year, symbol)
return JsonResponse(recipients)
else:
return redirect('../')
@api_view(['POST'])
def school_data(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
school_data = get_school_data(register_id, students, school_url, s)
return JsonResponse(school_data)
else:
return redirect('../')
@api_view(['POST'])
def dashboard(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
register_id = data['data']['register_id']
students = data['data']['students']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
diary_url = data['data']['diary_url']
symbol = data['data']['symbol']
dashboard = get_dashboard(register_id, students, s, diary_url, symbol)
return JsonResponse(dashboard)
else:
return redirect('../')
@api_view(['POST'])
def send(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
cookies_data = json.loads(data['cookies_data'])
register_id = cookies_data['data']['register_id']
students = cookies_data['data']['students']
school_url = cookies_data['data']['school_url']
s = cookies_data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
date = cookies_data['data']['date']
school_year = cookies_data['data']['school_year']
symbol = cookies_data['data']['symbol']
send_data = {'data': data['data'], 'subject': data['subject'], 'content': data['content']}
send = send_message(register_id, students, school_url, s, date, school_year, symbol, send_data)
return JsonResponse(send, safe=False)
else:
return redirect('../')
@api_view(['POST'])
def message_content(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
cookies_data = json.loads(data['cookies_data'])
register_id = cookies_data['data']['register_id']
students = cookies_data['data']['students']
school_url = cookies_data['data']['school_url']
s = cookies_data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
date = cookies_data['data']['date']
school_year = cookies_data['data']['school_year']
symbol = cookies_data['data']['symbol']
message_id = data['message_id']
content = get_message_content(register_id, students, school_url, s, date, school_year, symbol, message_id)
return JsonResponse(content, safe=False)
else:
return redirect('../')
@api_view(['POST'])
def student_data(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
data = get_student_data(register_id, students, school_url, s)
return JsonResponse(data)
else:
return redirect('../')
#STATS
@api_view(['POST'])
def partial(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
partial_stats = get_partial(register_id, students, school_url, s)
return JsonResponse(partial_stats)
else:
return redirect('../')
@api_view(['POST'])
def year(request, *args, **kwargs):
if request.session.has_key('is_logged'):
data = json.loads(request.body)
register_id = data['data']['register_id']
students = data['data']['students']
school_url = data['data']['school_url']
s = data['data']['s']
key = bytes(request.session[request.session.session_key], 'utf-8')
s = decrypt_cookies(s, key)
year_stats = get_year(register_id, students, school_url, s)
return JsonResponse(year_stats)
else:
return redirect('../')
@api_view(['GET'])
def log_out(request, *args, **kwargs):
del request.session[request.session.session_key]
del request.session['is_logged']
return JsonResponse({'logOut': True})
```
#### File: wulkanowy-web/tests/test_views.py
```python
from django.test import TestCase, Client
from django.urls import reverse
import json
class TestViews(TestCase):
def setUp(self):
self.client = Client()
self.csrf_token = Client(enforce_csrf_checks=True)
self.list_url = reverse('home')
self.detail_url = reverse('content')
def test_views(self):
#DEFAULT_VIEW
print("\033[94mTesting login view...")
response = self.client.get(self.list_url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'frontend/index.html')
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#CONTENT_VIEW
print("\033[94mTesting content view...")
response = self.client.get(self.detail_url)
self.assertEquals(response.status_code, 302)
if response.status_code == 302:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#ACCOUNT_MANAGER
print("\033[94mTesting account manager view...")
response = self.client.get(reverse('account_manager'))
self.assertEquals(response.status_code, 302)
if response.status_code == 302:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#API
data = {
"loginName": "<EMAIL>",
"Password": "<PASSWORD>",
"Symbol": "powiatwulkanowy",
"diaryUrl": "http://cufs.fakelog.tk/"
}
print("\033[94mTesting login...")
response = self.client.post(reverse('login'), content_type='application/xml', data=json.dumps(data))
cookies_data = response.json()
self.assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
students = cookies_data['data']['students']['data']
#JAN
print("\033[94mTesting as <NAME>...")
print("\033[95m============================================================")
jan_data = students[0]
cookies_data['data']['students']['data'] = [jan_data]
get_data_test(self.client, cookies_data, self.assertEquals)
print("\033[96m========================================================================")
#JOANNA
print("\033[94mTesting as <NAME>...")
print("\033[95m============================================================")
joanna_data = students[3]
cookies_data['data']['students']['data'] = [joanna_data]
get_data_test(self.client, cookies_data, self.assertEquals)
log_out_test(self.client, self.assertEquals)
def get_data_test(client, cookies_data, assertEquals):
#GRADES
print("\033[94mTesting grades...")
response = client.post(reverse('grades'), content_type='application/xml', data=json.dumps(cookies_data))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#TIMETABLE
print("\033[94mTesting timetable...")
response = client.post(reverse('timetable'), content_type='application/xml', data=json.dumps({'cookies': json.dumps(cookies_data), 'week': 0}))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#EXAMS
print("\033[94mTesting exams...")
response = client.post(reverse('exams'), content_type='application/xml', data=json.dumps({'cookies': json.dumps(cookies_data), 'week': 0}))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#HOMEWORKS
print("\033[94mTesting homeworks...")
response = client.post(reverse('homeworks'), content_type='application/xml', data=json.dumps({'cookies': json.dumps(cookies_data), 'week': 0}))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#ATTENDANCE
print("\033[94mTesting attendance...")
response = client.post(reverse('attendance'), content_type='application/xml', data=json.dumps({'cookies': json.dumps(cookies_data), 'week': 0}))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#NOTES
print("\033[94mTesting notes...")
response = client.post(reverse('notes'), content_type='application/xml', data=json.dumps(cookies_data))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#SCHOOL DATA
print("\033[94mTesting school data...")
response = client.post(reverse('school_data'), content_type='application/xml', data=json.dumps(cookies_data))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#DASHBOARD
print("\033[94mTesting dashboard...")
response = client.post(reverse('dashboard'), content_type='application/xml', data=json.dumps(cookies_data))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#MOBILE ACCESS
#REGISTERED DEVICES
print("\033[94mTesting registered devices...")
response = client.post(reverse('registered_devices'), content_type='application/xml', data=json.dumps(cookies_data))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#REGISTER DEVICE
print("\033[94mTesting registering device...")
response = client.post(reverse('register_device'), content_type='application/xml', data=json.dumps(cookies_data))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#MESSAGES
#RECEIVED MESSAGES
print("\033[94mTesting received messages...")
messages_ids = []
response = client.post(reverse('received_messages'), content_type='application/xml', data=json.dumps(cookies_data))
assertEquals(response.status_code, 200)
messages_ids.append([response.json()['data']])
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#SENT MESSAGES
print("\033[94mTesting sent messages...")
response = client.post(reverse('sent_messages'), content_type='appication/xml', data=json.dumps(cookies_data))
assertEquals(response.status_code, 200)
for id in response.json()['data']:
messages_ids.append(id)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#DELETED MESSAGES
print("\033[94mTesting deleted messages...")
response = client.post(reverse('deleted_messages'), content_type='application/xml', data=json.dumps(cookies_data))
assertEquals(response.status_code, 200)
messages_ids.append([response.json()['data']])
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#GET RECIPIENTS
print("\033[94mTesting getting recipients...")
response = client.post(reverse('recipients'), content_type='application/xml', data=json.dumps(cookies_data))
assertEquals(response.status_code, 200)
recipients = response.json()['addressee']['data']
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#STUDENT DATA
print("\033[94mTesting student data...")
response = client.post(reverse('student_data'), content_type='application/xml', data=json.dumps(cookies_data))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#STATS
#PARTIAL
print("\033[94mTesting partial grades stats...")
response = client.post(reverse('partial'), content_type='application/xml', data=json.dumps(cookies_data))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#YEAR
print("\033[94mTesting year grades stats...")
response = client.post(reverse('year'), content_type='application/xml', data=json.dumps(cookies_data))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#SEND MESSAGE
print("\033[94mTesting sending message...")
for recipient in recipients:
send_data = {
'cookies_data': json.dumps(cookies_data),
'data': recipient,
'subject': 'Test subject',
'content': 'Test content'
}
response = client.post(reverse('send_message'), content_type='application/xml', data=json.dumps(send_data))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
#GETTING MESSAGE CONTENT
print("\033[94mTesting getting content of message...")
for id in messages_ids:
send_data = {
'cookies_data': json.dumps(cookies_data),
'message_id': id
}
response = client.post(reverse('message_content'), content_type='application/xml', data=json.dumps(send_data))
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
def log_out_test(client, assertEquals):
#LOG OUT
print("\033[94mTesting logging out...")
response = client.get(reverse('log_out'), content_type='application/xml')
assertEquals(response.status_code, 200)
if response.status_code == 200:
print("\033[92mPassed!")
else:
print("\033[91mFailed!")
``` |
{
"source": "6arms1leg/taskuler",
"score": 3
} |
#### File: taskuler/util/dms-sched-cpu-load.py
```python
import argparse
import pandas as pd
import math
import sys
# WCRT calc. iteration limit after which no convergence is assumed
convTryLim = 50
# Return function handle of argument type function for ArgumentParser checking
# float range: min <= arg <= max
def floatRange(min, max):
# Define the function with default arguments
def floatRangeChecker(arg):
# New type function for argparse - float within predefined range
try:
floatVal = float(arg)
except ValueError:
raise argparse.ArgumentTypeError('Must be a floating point number')
if floatVal < min or floatVal > max:
raise argparse.ArgumentTypeError('Must be in range [' + str(min)
+ '..' + str(max)+']')
return floatVal
# Return function handle to checking function
return floatRangeChecker
# Handle positional and optional arguments
parser = argparse.ArgumentParser(description='Perform schedulability analysis \
for partly preemptive DMS based on CSV input \
file')
parser.add_argument('-t', '--timeTick', type=floatRange(0.0, 10.0),
help='Seconds corresponding to one time tick (improves \
accuracy of the analysis)')
parser.add_argument('-l', '--cpuLoadLim', type=floatRange(0.0, 100.0),
help='CPU load limit in %% (if exceeded, script returns \
non-zero exit code, e.g. for CI purposes)')
parser.add_argument('inputFile', help='CSV input file')
parser.add_argument('outputFileBase', help='MD output file base name')
args = parser.parse_args()
# If time tick argument is not provided ...
if not args.timeTick:
args.timeTick = 0
# Read timing table CSV input file
df = pd.read_csv(args.inputFile, skipinitialspace=True, comment='#')
# Print read input for visual confirmation
print(df.to_string())
# Sort timing table by deadline
# "Mergesort" algorithm is used as it only changes the order if necessary.
# Tasks with equal deadlines stay in the same (maybe intentional) order as in
# the input timing table.
df.sort_values(by=['Deadline in s'], inplace=True, kind='mergesort')
df = df.reset_index(drop=True)
# Add priority column
df.insert(1, 'Prio.', list(range(1, len(df) + 1)))
# Calc. and add period column
df['Period in s'] = 1 / df['Freq. in Hz']
# OBJECTIVE 1 - Calculate total CPU load
# --------------------------------------
#
# The CPU utilization per task is simply its WCET divided by its period.
# The total CPU load is then the sum of all task’s CPU utilizations.
# See [1].
# Calc. CPU utilization for each task (in %)
df['CPU util. in %'] = df['WCET in s'] / df['Period in s'] * 100
# Calc. total CPU load
cpuLoad = df['CPU util. in %'].sum()
# OBJECTIVE 2 - Calculate WCRT
# ----------------------------
#
# Depending on the scheduling strategy, two different WCRT calculation
# algorithms must be employed:
#
# * ALGORITHM 1 - Taskuler with only cooperative tasks,
# * ALGORITHM 2 - Taskuler augmented with (nested) interrupts.
#
# Algorithm 2 gives an upper boundary (worst-case), and therefore can also be
# used for systems without interrupt priorization (no nesting).
# All algorithms assume a timing table that is already sorted by deadline.
# Count
#
# * cooperative tasks, and
# * preemptive tasks (with priority; nested interrupts)
#
# in order to decide which algorithm to employ
schedule = df['Sched.'].value_counts()
# ALGORITHM 1 - Taskuler with only cooperative tasks
#
# The Taskuler is used to schedule all tasks without augmentation by
# interrupts.
# Note, however, that the Taskuler needs a relative system time tick, which is
# normally implemented via an interrupt.
#
# This algorithm consists of STEPs 1--3.
if 'co' in schedule and 'pe' not in schedule:
print('\nWCRT calc. for cooperative tasks ...\n')
wcrt = []
for idx, wcet in enumerate(df['WCET in s']): # For each task: ...
# ... STEP1 - Sum up all the WCETs of all higher prio. tasks
wcrtTmp = df['WCET in s'].loc[:idx].sum()
# ... If it is not the lowest prio. task:
# STEP2 - Find and add the one longest WCET out of of all lower prio.
# tasks and
# STEP3 - Substract the time of 1 time tick
if idx is not len(df.index)-1:
wcrtTmp += df['WCET in s'].loc[idx+1:].max() - args.timeTick
wcrt.append(wcrtTmp)
df['WCRT in s'] = wcrt
# ALGORITHM 2 - Taskuler augmented with (nested) interrupts
#
# The Taskuler is used to schedule most tasks but augmented by (nested)
# interrupts for the scheduling of some high priority tasks.
#
# This algorithm consists of STEPs 1--4.3.
elif 'co' in schedule and 'pe' in schedule:
print('\nWCRT calc. for cooperative and preemptive tasks (with priority;')
print(' nested interrupts) ...\n')
# STEP 1 - Divide timing table in two seperate tables, one with all
# preemptive and one with all cooperative tasks
co = df[df['Sched.'] == 'co']
pe = df[df['Sched.'] == 'pe']
co = co.reset_index(drop=True)
pe = pe.reset_index(drop=True)
# STEP 2 - Calculate the WCRT for each task in the preemptive timing table
# as per [2], eq. (1)
# The WCRT of the highest priority preemptive task is its WCET
wcrtPn = [pe['WCET in s'].loc[0]]
for idx_1, elem in enumerate(pe['WCET in s'].loc[1:]): # For each task
# except the first:
# ...
wcrtPrev = 0
wcrtNow = elem
convTryCnt = 0
while wcrtNow != wcrtPrev:
wcrtPrev = wcrtNow
wcrtNow = elem
for idx_2 in range(idx_1+1):
wcrtNow += math.ceil(wcrtPrev / pe['Period in s'].loc[idx_2]) \
* pe['WCET in s'].loc[idx_2]
if convTryCnt >= convTryLim:
print('\nNo convergence in PE WCRT calc. for '
+ pe['Task'].loc[idx_1] + ' after ' + str(convTryLim)
+ ' iterations.')
sys.exit(1)
else:
convTryCnt += 1
wcrtPn.append(wcrtNow)
# STEP 3 - Calculate WCRT' for each task in the cooperative timing table.
#
# WCRT' is the resulting WCRT of a cooperative task caused by pre-empting
# tasks, with the effect of other cooperative tasks not yet included.
#
# This is done by appending each cooperative task seperately (as the lowest
# priority task) to the preemptive timing table and calculate its WCRT' as
# per [2], eq. (1).
wcrtCo = []
for idx_1, elem_1 in enumerate(co['WCET in s']):
wcrtPrev = 0
wcrtNow = elem_1
convTryCnt = 0
while wcrtNow != wcrtPrev:
wcrtPrev = wcrtNow
wcrtNow = elem_1
for idx_2, elem_2 in enumerate(pe['WCET in s']):
wcrtNow += math.ceil(wcrtPrev / pe['Period in s'].loc[idx_2]) \
* elem_2
if convTryCnt >= convTryLim:
print('\nNo convergence in CO WCRT calc. for '
+ co['Task'].loc[idx_1] + ' after ' + str(convTryLim)
+ ' iterations.')
sys.exit(1)
else:
convTryCnt += 1
wcrtCo.append(wcrtNow)
# STEP 4 - Calculate the final WCRT for each cooperative task by performing
# the following steps for the WCRT' for each task in the cooperative timing
# table:
wcrt = []
for idx, elem in enumerate(wcrtCo):
# STEP4.1 - Add sum of all higher priority cooperative task’s WCRT'.
wcrtTmp = elem + sum(wcrtCo[:idx])
# ... If it is not the lowest prio. task:
# STEP4.2 - Find and add the one longest WCRT' out of of all lower
# priority cooperative tasks and
# STEP4.3 - Substract the time of 1 time tick
if idx is not len(wcrtCo)-1:
wcrtTmp += max(wcrtCo[idx+1:]) - args.timeTick
wcrt.append(wcrtTmp)
wcrtCo = wcrt
pe['WCRT in s'] = wcrtPn
co['WCRT in s'] = wcrtCo
df = pd.concat([pe, co])
# Sort by deadline
df.sort_values(by=['Prio.'], inplace=True)
df = df.reset_index(drop=True)
else:
print('Invalid mix of co/pe')
# Add deadline overrun/violation column and count all violations (`True`s)
df['Deadline overrun?'] = df['Deadline in s'] < df['WCRT in s']
deadlineOverrunCnt = df['Deadline overrun?'].sum()
# Print final timing table for visual confirmation
print(df.to_string())
# Create table with the result summary
res = pd.DataFrame({'Objective': ['Deadline overruns', 'Total CPU load'],
'Value': [str(deadlineOverrunCnt), str(cpuLoad) + ' %']})
# Print schedulability result (table with the result summary)
print('\n' + res.to_string(index=False))
# Write final Markdown input timing table
df.loc[:,:'WCET in s'].to_markdown(buf=args.outputFileBase + '-in.md',
index=False)
# Write final Markdown output timing table
pd.concat([df['Task'], df.loc[:,'Period in s':]],
axis=1).to_markdown(buf=args.outputFileBase + '-out.md', index=False)
# Write final Markdown result timing table
res.to_markdown(buf=args.outputFileBase + '-res.md', index=False)
# If CPU load limit argument is provided ...
if args.cpuLoadLim:
# Use non-zero exit code if schedulability is unfeasible.
# This allows for easy employment in continuous integration systems.
if deadlineOverrunCnt == 0 and cpuLoad <= args.cpuLoadLim:
print('\n=> Schedulability seems feasible')
sys.exit(0)
else:
print('\n=> Schedulability unfeasible')
sys.exit(1)
sys.exit(0)
``` |
{
"source": "6ba/bbgo",
"score": 2
} |
#### File: bbgo/accounts/forms.py
```python
from __future__ import unicode_literals
from accounts.models import Profile
from django import forms
from django.conf import settings
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
from django.utils import formats, timezone
from django.utils.translation import ugettext as _
class RegistrationForm(UserCreationForm):
"""Edit form for sign up"""
email = forms.EmailField(label='email', required=True)
code = forms.CharField(label='code', required=False)
class Meta:
"""Meta for RegistrationForm"""
model = User
fields = {"username", "email", "code"}
if settings.ENABLE_NICKNAME:
fields.add("first_name")
def __init__(self, *args, **kwargs):
"""Override maxlength"""
super(RegistrationForm, self).__init__(*args, **kwargs)
self.fields['username'].widget.attrs['maxlength'] = settings.ID_MAX_LENGTH
if settings.ENABLE_NICKNAME:
self.fields['first_name'].widget.attrs['maxlength'] = settings.ID_MAX_LENGTH
class SettingForm(forms.ModelForm):
"""Edit form for setting"""
class Meta:
"""Meta for SettingForm"""
model = Profile
fields = {
"alarm_interval", "alarm_board", "alarm_reply", "alarm_paper",
"alarm_team", "alarm_full", "sense_client", "sense_slot"
}
def __init__(self, *args, **kwargs):
"""Init"""
super(SettingForm, self).__init__(*args, **kwargs)
self.fields['alarm_reply'].widget.attrs['checked'] = 'checked'
self.fields['alarm_reply'].widget.attrs['disabled'] = True
self.fields['alarm_full'].widget.attrs['checked'] = 'checked'
self.fields['alarm_full'].widget.attrs['disabled'] = True
class UserInfoForm(forms.ModelForm):
"""Edit form for user info"""
email = forms.EmailField(label='email', required=True)
code = forms.CharField(label='code', required=False)
first_name = forms.CharField(max_length=12, required=False)
class Meta:
"""Meta for UserInfoForm"""
model = Profile
fields = {
"portrait", "email", "code", "id1", "id2", "id3", "signature"
}
if settings.ENABLE_NICKNAME:
fields.add("first_name")
def __init__(self, *args, **kwargs):
"""Init"""
super(UserInfoForm, self).__init__(*args, **kwargs)
self.fields['email'].initial = self.instance.user.email
if settings.ENABLE_NICKNAME:
self.fields['first_name'].initial = self.instance.user.first_name
self.fields['first_name'].widget.attrs['maxlength'] = settings.ID_MAX_LENGTH
class LoginForm(AuthenticationForm):
"""Custom login form for suspension"""
def confirm_login_allowed(self, user):
"""Override confirm_login_allowed"""
if user.is_active:
pass
elif not user.is_active:
now = timezone.now()
if now > user.profile.suspension_till:
user.is_active = True
user.save()
else:
formatted_date = formats.date_format(timezone.localtime(
user.profile.suspension_till), "Y-m-d H:i:s")
error = _('You have been suspended until %(date)s.') % {
'date': formatted_date
}
raise forms.ValidationError(error, code='suspended')
```
#### File: bbgo/accounts/views.py
```python
from __future__ import unicode_literals
from math import ceil
import re
from smtplib import SMTPException
from boards.models import Board, Reply
from boards.table import BoardTable
from core.utils import error_page
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth.models import User
from django.core.mail import send_mail
from django.core.signing import TimestampSigner
from django.db.models import Q
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse_lazy
from django.utils import timezone
from django.utils.translation import ugettext as _
from teams.table import TeamTable
from .forms import RegistrationForm, SettingForm, UserInfoForm
from .models import Profile, UserSession
@login_required
def setting(request):
"""Account setting"""
if request.method == "POST":
settingform = SettingForm(request.POST)
if settingform.is_valid():
setting = settingform.save(commit=False)
request.user.profile.sense_client = setting.sense_client
request.user.profile.sense_slot = setting.sense_slot
request.user.profile.alarm_board = setting.alarm_board
request.user.profile.alarm_reply = True
request.user.profile.alarm_paper = setting.alarm_paper
request.user.profile.alarm_team = setting.alarm_team
request.user.profile.alarm_full = True
if setting.alarm_interval < settings.MIN_ALARM_INTERVAL:
request.user.profile.alarm_interval \
= settings.MIN_ALARM_INTERVAL
elif setting.alarm_interval > settings.MAX_ALARM_INTERVAL:
request.user.profile.alarm_interval \
= settings.MAX_ALARM_INTERVAL
else:
request.user.profile.alarm_interval = setting.alarm_interval
request.user.profile.save()
msg = _('Saved successfully.')
else:
msg = _('Form validation Failure')
elif request.method == "GET":
if request.user.is_authenticated:
msg = ""
settingform = SettingForm(instance=request.user.profile)
else:
return redirect('/')
return render(
request,
"accounts/setting.html",
{
'settingform': settingform,
'msg': msg,
}
)
@login_required
def edit_user_info(request):
"""Edit user information"""
profile = get_object_or_404(Profile, pk=request.user.profile.id)
if request.method == "POST":
infoform = UserInfoForm(request.POST, request.FILES, instance=profile)
if infoform.is_valid():
error = False
if settings.ENABLE_NICKNAME:
nick = infoform.cleaned_data['first_name']
if nick != request.user.first_name:
if nick == '':
request.user.first_name = ''
else:
q = Q(username__iexact=nick) \
| Q(first_name__iexact=nick)
if User.objects.filter(q).exists() or \
len(nick) < settings.NICKNAME_MIN_LENGTH or \
len(nick) > settings.NICKNAME_MAX_LENGTH:
msg = _('Please check nickname.')
error = True
else:
request.user.first_name = nick
email = infoform.cleaned_data['email']
if not error and email != request.user.email:
code = infoform.cleaned_data['code']
signer = TimestampSigner()
try:
value = signer.unsign(
code, max_age=settings.VERIFICATION_CODE_VALID)
code_check = value == email
if code_check:
request.user.email = email
else:
msg = _('Verification failure. Please check verification code again.')
error = True
except:
msg = _('Verification failure. Please check verification code again.')
error = True
if not error:
msg = _('Saved successfully.')
request.user.save()
infoform.save()
else:
msg = _('Form validation Failure')
elif request.method == "GET":
if request.user.is_authenticated:
msg = ""
infoform = UserInfoForm(instance=profile)
else:
return redirect('/')
return render(
request,
"accounts/edit_user_info.html",
{
'infoform': infoform,
'username': request.user.username,
'date_joined': request.user.date_joined,
'point': profile.point,
'portrait': profile.portrait,
'msg': msg,
}
)
@login_required
def user_info(request, user):
"""Show user info"""
userinfo = User.objects.filter(username__iexact=user).get()
article_no = Board.objects.filter(user__username__iexact=user).count()
reply_no = Reply.objects.filter(user__username__iexact=user).count()
return render(
request,
"accounts/user_info.html",
{
'userinfo': userinfo,
'article_no': article_no,
'reply_no': reply_no,
}
)
@login_required
def scrap_list(request, page=0):
"""Show scrap list"""
if int(page) < 1:
return redirect('accounts:scrap_list', page=1)
board_table = BoardTable()
my_scrap = []
name_list = board_table.get_table_list()
list_count = board_table.get_list_count()
current_page = int(page) - 1
start_at = current_page * list_count
end_at = start_at + list_count
q = Q(status__iexact='1normal') | Q(status__iexact='4warning') \
| Q(status__iexact='3notice')
scrap = request.user.profile.scrap.split(',')
total = len(scrap)
if request.user.profile.scrap != '':
for index, s in enumerate(scrap[start_at:end_at]):
app, id = s.split(':')
if app == 'boards':
item = Board.objects.filter(id__iexact=id).filter(q)
if item.count():
my_scrap.append([item[0]])
else:
continue
index_total = int(ceil(float(total) / list_count))
index_begin = int(current_page / 10) * 10 + 1
index_end = mindex_end = index_total
if index_end - index_begin >= 10:
index_end = index_begin + 9
mindex_begin = int(current_page / 5) * 5 + 1
if mindex_end - mindex_begin >= 5:
mindex_end = mindex_begin + 4
return render(
request,
"accounts/scrap.html",
{
'my_scrap': my_scrap,
'total': total,
'page': current_page + 1,
'index_begin': index_begin,
'index_end': index_end + 1,
'mindex_begin': mindex_begin,
'mindex_end': mindex_end + 1,
'index_total': index_total,
'name_list': name_list,
}
)
@login_required
def delete_scrap(request, id):
"""Delete selected scrap"""
profile = request.user.profile
app_id = 'boards:' + id
regstr = re.escape(app_id) + r"\b(,|)"
profile.scrap = re.sub(regstr, '', profile.scrap)
if profile.scrap and profile.scrap[-1] == ',':
profile.scrap = profile.scrap[:-1]
request.user.profile.save()
return redirect('accounts:scrap_list_0')
@login_required
def edit_bookmarks(request):
"""Edit bookmarks"""
my_bookmark = []
if request.user.profile.bookmarks:
bookmarks = request.user.profile.bookmarks.split(',')
for bm in bookmarks:
app, id = bm.split('-')
if app == 'boards':
app_table = BoardTable()
elif app == 'teams':
app_table = TeamTable()
else:
continue
my_bookmark.append(
[bm, app_table.get_table_name(id)]
)
return render(
request,
"accounts/edit_bookmarks.html",
{
'my_bookmark': my_bookmark,
}
)
def sign_up(request):
"""Sign up"""
if request.method == "POST":
userform = RegistrationForm(request.POST)
if userform.is_valid():
userform.save(commit=False)
username = userform.cleaned_data['username']
q = Q(username__iexact=username) | Q(first_name__iexact=username)
if User.objects.filter(q).exists() or \
len(username) < settings.ID_MIN_LENGTH or \
len(username) > settings.ID_MAX_LENGTH:
errormsg = _('Please check username.')
return error_page(request, errormsg)
if settings.ENABLE_NICKNAME:
nick = userform.cleaned_data['first_name']
if nick:
q = Q(username__iexact=nick) | Q(first_name__iexact=nick)
if User.objects.filter(q).exists() or \
len(nick) < settings.NICKNAME_MIN_LENGTH or \
len(nick) > settings.NICKNAME_MAX_LENGTH:
errormsg = _('Please check nickname.')
return error_page(request, errormsg)
code = userform.cleaned_data['code']
email = userform.cleaned_data['email']
signer = TimestampSigner()
try:
value = signer.unsign(
code, max_age=settings.VERIFICATION_CODE_VALID)
code_check = value == email
if code_check:
userform.save()
return render(
request,
"accounts/join.html",
)
else:
errormsg = _('Verification failure. Please check verification code again.')
except:
errormsg = _('Verification failure. Please check verification code again.')
else:
errormsg = _('Sorry. Please try again later.')
return error_page(request, errormsg)
elif request.method == "GET":
userform = RegistrationForm()
return render(
request,
"accounts/signup.html",
{
'userform': userform,
}
)
@login_required
def show_deactivate_account(request):
"""Show deactivate account page"""
return render(
request,
"accounts/deactivate_account.html"
)
@login_required
def deactivate_account(request):
"""Deactivate account"""
if request.user.is_authenticated:
request.user.is_active = False
if request.user.is_staff:
request.user.is_staff = False
request.user.save()
return redirect(reverse_lazy('accounts:logout'))
@user_passes_test(lambda u: u.is_superuser)
def send_email(request):
"""Send email to user for testing purpose"""
id_email = request.user.email
signer = TimestampSigner()
value = signer.sign(id_email)
subject = u'Test email.'
body = u'keyCode: %s' % value
try:
send_mail(subject, body, settings.EMAIL_HOST_USER, [id_email], fail_silently=False)
return error_page(request, "Email sent", status=201)
except SMTPException:
return error_page(request, "Error!")
@staff_member_required
def dashboard_user(request, search_word='', condition='recent', page=1):
"""Dashboard user"""
list_count = settings.DASHBOARD_LIST_COUNT
if int(page) < 1:
return redirect('accounts:dashboard_user', condition, 1)
if condition == 'recent':
order = '-id'
elif condition == 'point':
order = '-profile__point'
elif condition == 'login':
order = '-last_login'
elif condition == 'suspension':
order = '-profile__suspension_till'
elif condition != 'default':
return error_page(request)
current_page = int(page) - 1
start_at = current_page * list_count
end_at = start_at + list_count
if search_word == '':
q = Q()
else:
q = (Q(username__icontains=search_word) | Q(first_name__icontains=search_word)) | Q(email__icontains=search_word) | Q(profile__ip_list__icontains=search_word)
total = User.objects.filter(q).count()
if condition == 'default':
users = User.objects.filter(q).order_by(
'-is_superuser', '-is_staff', '-is_active', 'username')[
start_at:end_at]
elif condition == 'suspension':
users = User.objects.filter(q).filter(is_active=False).order_by(
order)[start_at:end_at]
else:
users = User.objects.filter(q).order_by(order)[start_at:end_at]
index_total = int(ceil(float(total) / list_count))
index_begin = int(current_page / 10) * 10 + 1
index_end = mindex_end = index_total
if index_end - index_begin >= 10:
index_end = index_begin + 9
mindex_begin = int(current_page / 5) * 5 + 1
if mindex_end - mindex_begin >= 5:
mindex_end = mindex_begin + 4
return render(
request,
"accounts/dashboard_user.html",
{
'users': users,
'total': total,
'page': current_page + 1,
'index_begin': index_begin,
'index_end': index_end + 1,
'mindex_begin': mindex_begin,
'mindex_end': mindex_end + 1,
'index_total': index_total,
'search_word': search_word,
'condition': condition,
}
)
@staff_member_required
def suspension(request, user, days):
"""Suspend user account for days"""
sus_days = int(days)
userinfo = User.objects.filter(username__iexact=user).get()
if sus_days == 0 and not userinfo.is_active:
userinfo.profile.suspension_till = timezone.now()
userinfo.is_active = True
userinfo.save()
elif sus_days > 0:
sus_until = timezone.now() + timezone.timedelta(days=sus_days)
userinfo.profile.suspension_till = sus_until
userinfo.is_active = False
userinfo.save()
sessions = UserSession.objects.filter(user=userinfo)
for session in sessions:
session.session.delete()
return redirect('accounts:user_info', user)
```
#### File: bbgo/boards/forms.py
```python
from __future__ import unicode_literals
from django import forms
from django.utils.translation import ugettext as _
from django_summernote.widgets import SummernoteWidget
from .models import Board, Reply
class BoardEditForm(forms.ModelForm):
"""Form for board"""
class Meta:
"""Meta for ModelForm"""
CATEGORY = (
)
model = Board
exclude = (
'table', 'user', 'created_at', 'modified_at', 'ip',
'view_count', 'like_count', 'dislike_count', 'reply_count',
'like_users', 'dislike_users'
)
widgets = {
'subject': forms.TextInput(
attrs={'placeholder': _('Enter title here.')}
),
'reference': forms.TextInput(
attrs={'placeholder': _('Add a reference URL.')}
),
'category': forms.Select(choices=CATEGORY),
'content': SummernoteWidget(),
}
def __init__(self, *args, **kwargs):
"""Init"""
self.user = kwargs.pop('user', None)
super(BoardEditForm, self).__init__(*args, **kwargs)
class ReplyEditForm(forms.ModelForm):
"""Form for reply"""
class Meta:
"""Meta for ModelForm"""
model = Reply
exclude = (
'reply_id', 'reply_to', 'status', 'user',
'created_at', 'modified_at', 'ip',
'like_count', 'dislike_count', 'like_users', 'dislike_users'
)
def __init__(self, *args, **kwargs):
"""Init"""
self.user = kwargs.pop('user', None)
super(ReplyEditForm, self).__init__(*args, **kwargs)
```
#### File: bbgo/boards/models.py
```python
from __future__ import unicode_literals
from django.conf import settings
from django.contrib.auth.models import User
from django.db import models
from django.urls import reverse_lazy
from django.utils.translation import ugettext as _
class Board(models.Model):
"""Board of boards"""
BOARD_STATUS = {
('1normal', _('status_normal')),
('2temp', _('status_temp')),
('3notice', _('status_notice')),
('4warning', _('status_warning')),
('5hidden', _('status_hidden')),
('6deleted', _('status_deleted')),
}
table = models.IntegerField(default=0)
status = models.CharField(max_length=10, choices=BOARD_STATUS, default='1normal')
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now_add=True)
ip = models.GenericIPAddressField()
category = models.CharField(max_length=23, blank=True)
subject = models.CharField(max_length=41)
content = models.TextField()
view_count = models.IntegerField(default=0)
reply_count = models.IntegerField(default=0)
like_count = models.IntegerField(default=0)
dislike_count = models.IntegerField(default=0)
like_users = models.ManyToManyField(
User, related_name="board_like_users", default='', blank=True)
dislike_users = models.ManyToManyField(
User, related_name="board_dislike_users", default='', blank=True)
reference = models.CharField(max_length=1855, default='', blank=True)
has_image = models.BooleanField(default=False)
has_video = models.BooleanField(default=False)
def get_absolute_url(self):
"""Back to list"""
return reverse_lazy('boards:show_list', args=[self.table, 1])
def get_article_url(self):
"""Back to article"""
return reverse_lazy('boards:show_article', args=[self.id])
def get_edit_url(self):
"""Stay editing"""
return reverse_lazy('boards:edit_article', args=[self.id])
def get_status_text(self):
"""Get status text"""
if self.status == '1normal':
return _('status_normal')
elif self.status == '2temp':
return _('status_temp')
elif self.status == '3notice':
return _('status_notice')
elif self.status == '4warning':
return _('status_warning')
elif self.status == '5hidden':
return _('status_hidden')
elif self.status == '6deleted':
return _('status_deleted')
def get_image_text(self):
"""Get image text"""
return '<img src="/upload/django-summernote/'
def get_video_text(self):
"""Get video text"""
return '<iframe frameborder="0" src="//www.youtube.com/'
class Reply(models.Model):
"""Reply of boards"""
REPLY_STATUS = {
('1normal', _('status_normal')),
('5hidden', _('status_hidden')),
('6deleted', _('status_deleted')),
}
article_id = models.IntegerField(default=0)
reply_id = models.IntegerField(default=0)
reply_to = models.CharField(max_length=150, default='', blank=True)
status = models.CharField(
max_length=10, choices=REPLY_STATUS, default='1normal')
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now_add=True)
ip = models.GenericIPAddressField()
content = models.TextField(max_length=settings.REPLY_TEXT_MAX)
image = models.ImageField(upload_to="reply-images/%Y-%m-%d/", blank=True)
like_count = models.IntegerField(default=0)
dislike_count = models.IntegerField(default=0)
like_users = models.ManyToManyField(
User, related_name="reply_like_users", default='', blank=True)
dislike_users = models.ManyToManyField(
User, related_name="reply_dislike_users", default='', blank=True)
```
#### File: bbgo/boards/table.py
```python
from __future__ import unicode_literals
from django.urls import reverse_lazy
class BoardTable():
"""Table for all board settings"""
BOARD_LIST_COUNT = 20 # 한 페이지에 표시될 게시물 수
BEST_THRESHOLD = 20 # 베스트로 갈 추천 수
VETO_THRESHOLD = 10 # 베스트로 못가게 비토할 비추 수
SAMPLE_LIMIT = 10 # 포탈에서 샘플로 보여줄 리스트 수
SAMPLE_LIMIT_MOBILE = 5 # 포탈 모바일용 샘플 리스트 수
SAMPLE_NOTICE = 3 # 포탈에서 샘플로 보여줄 공지사항 수
# CATEGORY = [
# None,
# ['잡담', '질문', '홍보'], # 1
# ['정보', '강좌', '팁'], # 2
# ]
#
# BOARD_TABLES = [
# # ('게시판 제목', '게시판 설명', '카테고리')
# # 0: 카테고리 안씀
# # > 0: CATEGORY 에 설정된 카테고리 사용
# # 0 ~ 9 관리자만(superuser, staff) 쓰기 가능
# ['최신글', '모든 게시판의 최근 게시물을 모아봅니다.', 0], # 0: 쓰기금지
# ['전체공지', '', 0], # 1
# ['공지사항', '', 0], # 2
# ['도움말', 'bbgo 문서 및 가이드', 0], # 3
# ['', '', 0], # 4
# ['', '', 0], # 5
# ['', '', 0], # 6
# ['', '', 0], # 7
# ['신고게시판', '신고 게시판의 모든 글은 글쓴이와 관리자만 볼 수 있습니다.', 0], # 8: 비밀글
# ['베스트', '추천을 많이 받은 게시물이 자동 등록됩니다.', 0], # 9
# # 회원 쓰기 가능
# ['운영/문의/건의', '사이트 운영에 대한 건의나 문의, 버그신고 게시판입니다. 신고는 비밀글을 지원하는 신고게시판을 이용해 주세요.', 0], # 10
# ['정보게시판', '각종 정보, 강좌나 팁을 공유합니다.', 2], # 11
# ['게시판', 'bbgo 관련 요청이나 문의 사항을 자유롭게 적어주세요.', 0], # 12
# ]
CATEGORY = [
None,
["聊天", "问题", "宣传"],
["信息", "课程", "提示"]
]
BOARD_TABLES = [
# ('公告标题','公告说明'和'类别')
# 0:不要分类
# > 0:使用CATEGORY中设置的类别
# 0~9只有管理员(超级用户,员工)才能写
['最近的帖子', '收集所有主板的所有帖子', 0],# 0:禁止写
['所有公告', '', 0],# 1
['公告', '', 0],# 2
['帮助', 'bbgo docs and guides', 0],# 3
['', '', 0], # 4
['', '', 0],# 5
['', '', 0],# 6
['', '', 0],# 7
['报告板', '报告板中的所有帖子仅对作者和管理员可见', 0],# 8:
['最佳', '最推荐的帖子将自动注册', 0],# 9# 写成员
['运营/咨询/建议', '建议,查询和网站管理的错误报告。请使用支持秘密', 0],# 10的通知板
['信息板', '分享各种信息,讲座和提示', 2],# 11
['公告牌', '请随意表达您对bbgo的问题或疑问', 0],# 12
]
def get_list_count(self):
"""Get list count"""
return self.BOARD_LIST_COUNT
def get_sample_limit(self):
"""Get sample limit"""
return self.SAMPLE_LIMIT, self.SAMPLE_LIMIT_MOBILE
def get_sample_notice(self):
"""Get sample notice"""
return self.SAMPLE_NOTICE
def get_best_threshold(self):
"""Get best threshold"""
return self.BEST_THRESHOLD
def get_veto_threshold(self):
"""Get veto threshold"""
return self.VETO_THRESHOLD
def get_table_len(self):
"""Get number of tables"""
return len(self.BOARD_TABLES)
def get_table_name(self, table):
"""Get name of the table"""
return self.BOARD_TABLES[int(table)][0]
def get_table_url(self, table):
"""Get URL of the table"""
return reverse_lazy('boards:show_list', args=[table, 1])
def get_table_desc(self, table):
"""Get description of the table"""
return self.BOARD_TABLES[int(table)][1]
def get_table_category(self, table):
"""Get category of the table"""
return self.BOARD_TABLES[int(table)][2]
def get_category(self, table):
"""Get pre-defined category for the table"""
return self.CATEGORY[(self.BOARD_TABLES[int(table)][2])]
def get_table_list(self):
"""Get BOARD_TABLES"""
return self.BOARD_TABLES
def writable(self, request, table):
"""Writable for table"""
if request.user.is_authenticated:
writable = True
if int(table) == 0 or int(table) == 9:
writable = False
elif int(table) == 8:
writable = True
elif int(table) < 8 and not request.user.is_staff:
writable = False
else:
writable = False
return writable
```
#### File: bbgo/core/api.py
```python
from __future__ import unicode_literals
import re
from smtplib import SMTPException
import threading
from blogs.forms import CommentEditForm
from blogs.models import Blog, Comment
from boards.forms import ReplyEditForm
from boards.models import Board, Reply
from boards.table import BoardTable
from core.utils import error_page, error_to_response, get_ipaddress
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.core.mail import send_mail
from django.core.signing import TimestampSigner
from django.db.models import Case, IntegerField, Q, When
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, render_to_response
from django.utils import timezone
from django.utils.translation import ugettext as _
from msgs.models import Msg
from papers.models import Paper, Person
from papers.views import send_email_with_paper
from spams.views import check_spam
from teams.forms import TeamReplyEditForm
from teams.models import Team, TeamReply
def check_duplication(request):
"""API check_duplication"""
check_type = request.POST.get('check_type')
name = request.POST.get('username')
if check_type == 'id':
min_limit = settings.ID_MIN_LENGTH
max_limit = settings.ID_MAX_LENGTH
else:
min_limit = settings.NICKNAME_MIN_LENGTH
max_limit = settings.NICKNAME_MAX_LENGTH
q = Q(username__iexact=name) | Q(first_name__iexact=name)
idcheck = User.objects.filter(q).exists()
length = len(name)
if length < min_limit or length > max_limit:
return JsonResponse({'status': 'false'}, status=400)
if request.user.is_authenticated and idcheck:
if name == request.user.username or name == request.user.first_name:
idcheck = False
if idcheck:
msg = _('Already exist.')
else:
msg = _('Available')
data = {
'idcheck': idcheck,
'msg': msg,
}
return JsonResponse(data)
def get_verification_code(request):
"""API get_verification_code"""
email = request.POST.get('email')
if User.objects.filter(email__iexact=email).exists():
msg = _('E-mail exists. Why don\'t you try to find your password?')
data = {
'result': False,
'msg': msg,
}
return JsonResponse(data, status=201)
signer = TimestampSigner()
value = signer.sign(email)
subject = _('[%(site_name)s] Verification code for signing in') % {
'site_name': settings.SITE_NAME
}
body = value
try:
send_mail(subject, body, settings.EMAIL_HOST_USER, [email], fail_silently=False)
msg = _('Verification code sent. Please check your E-mail.')
data = {
'result': True,
'msg': msg,
}
return JsonResponse(data, status=201)
except SMTPException:
return JsonResponse({'status': 'false'}, status=400)
def check_validation(request):
"""API check_validation"""
code = request.POST.get('code')
email = request.POST.get('email')
signer = TimestampSigner()
try:
value = signer.unsign(code, max_age=settings.VERIFICATION_CODE_VALID)
code_check = value == email
if code_check:
return JsonResponse({'status': 'true'}, status=201)
except:
pass
return JsonResponse({'status': 'false'}, status=400)
def like_article(request, liketype):
"""API like_article"""
if request.method == 'POST':
if not request.user.is_authenticated:
msg = _("Require login")
return JsonResponse([0, msg], safe=False, status=201)
id = request.POST['id']
user = request.user
article = get_object_or_404(Board, pk=id)
if article.user == user:
msg = _("You like your own post?")
return JsonResponse([0, msg], safe=False, status=201)
like_users = article.like_users.all()
dislike_users = article.dislike_users.all()
if user not in like_users and user not in dislike_users:
if liketype == 'like':
article.like_users.add(user)
article.like_count += 1
article.save()
msg = _("You've liked this article")
return JsonResponse(
[article.like_count, msg], safe=False, status=201)
elif liketype == 'dislike':
article.dislike_users.add(user)
article.dislike_count += 1
article.save()
msg = _("You've disliked this article")
return JsonResponse(
[article.dislike_count, msg], safe=False, status=201)
else:
return JsonResponse({'status': 'false'}, status=400)
else:
if user in like_users:
msg = _("You've already liked")
else:
msg = _("You've already disliked")
return JsonResponse([0, msg], safe=False, status=201)
else:
return error_page(request)
def like_users(request, liketype):
"""API like_users"""
if request.method == 'POST':
id = request.POST['id']
article = get_object_or_404(Board, pk=id)
if liketype == 'like':
like_users = article.like_users.all()
elif liketype == 'dislike':
like_users = article.dislike_users.all()
else:
return JsonResponse({'status': 'false'}, status=400)
return render_to_response(
'boards/like_users.html',
{
'user': request.user,
'like_users': like_users,
}
)
else:
return error_page(request)
def like_reply(request, liketype):
"""API like_reply"""
if request.method == 'POST':
if not request.user.is_authenticated:
msg = _("Require login")
return JsonResponse([0, msg], safe=False, status=201)
id = request.POST['id']
user = request.user
reply = get_object_or_404(Reply, pk=id)
if reply.user == user:
msg = _("You like your own post?")
return JsonResponse([0, msg], safe=False, status=201)
like_users = reply.like_users.all()
dislike_users = reply.dislike_users.all()
if user not in like_users and user not in dislike_users:
if liketype == 'like':
reply.like_users.add(user)
reply.like_count += 1
reply.save()
return JsonResponse([reply.like_count], safe=False, status=201)
elif liketype == 'dislike':
reply.dislike_users.add(user)
reply.dislike_count += 1
reply.save()
return JsonResponse(
[reply.dislike_count], safe=False, status=201)
else:
return JsonResponse({'status': 'false'}, status=400)
else:
if user in like_users:
msg = _("You've already liked")
else:
msg = _("You've already disliked")
return JsonResponse([0, msg], safe=False, status=201)
return JsonResponse(status=201)
else:
return error_page(request)
def write_reply(request):
"""API write_reply"""
if not request.user.is_authenticated:
return JsonResponse({'status': 'false'}, status=401)
if request.method == 'POST':
id = request.POST['article_id']
reply_id = r_id = int(request.POST['reply_id'])
reply_to = ''
form = ReplyEditForm(request.POST, request.FILES)
if form.is_valid():
article = get_object_or_404(Board, pk=id)
if article.status != '1normal' and article.status != '3notice' \
and not request.user.is_staff:
return JsonResponse({'status': 'false'}, status=402)
reply = form.save(commit=False)
parent_id = reply_id
while parent_id != 0:
parent = get_object_or_404(Reply, pk=parent_id)
if parent:
if parent_id == reply_id and request.user != parent.user:
reply_to = parent.user.username
parent_id = parent.reply_id
if parent_id == 0:
reply_id = parent.id
else:
return JsonResponse({'status': 'false'}, status=400)
reply.reply_id = reply_id
reply.reply_to = reply_to
reply.status = '1normal'
reply.user = request.user
reply.ip = get_ipaddress(request)
reply.save()
article.reply_count += 1
article.save()
if article.user != request.user and reply_id == 0:
if article.user.profile.alarm_board:
if article.user.profile.alarm_list != '':
article.user.profile.alarm_list += ','
alarm_text = 'b:%d' % article.id
article.user.profile.alarm_list += alarm_text
article.user.profile.alarm = True
article.user.profile.save()
elif reply_to != request.user.username and reply_id > 0:
user = User.objects.filter(username=reply_to)
if user:
if user[0].profile.alarm_reply:
if user[0].profile.alarm_list != '':
user[0].profile.alarm_list += ','
alarm_text = 'r:%d' % r_id
user[0].profile.alarm_list += alarm_text
user[0].profile.alarm = True
user[0].save()
request.user.profile.last_reply_at = timezone.now()
request.user.profile.point += settings.POINT_REPLY
request.user.profile.save()
replies = Reply.objects.filter(article_id=id).annotate(
custom_order=Case(
When(reply_id=0, then='id'),
default='reply_id',
output_field=IntegerField(),
)
).order_by('custom_order', 'id')
return render_to_response(
'boards/show_reply.html',
{
'user': request.user,
'article_user': article.user,
'replies': replies,
'count': replies.count()
}
)
return JsonResponse({'status': 'false'}, status=400)
else:
return error_to_response(request)
def reload_reply(request):
"""API reload_reply"""
if request.method == 'POST':
id = request.POST['id']
replies = Reply.objects.filter(article_id=id).annotate(
custom_order=Case(
When(reply_id=0, then='id'),
default='reply_id',
output_field=IntegerField(),
)
).order_by('custom_order', 'id')
article = get_object_or_404(Board, pk=id)
return render_to_response(
'boards/show_reply.html',
{
'user': request.user,
'article_user': article.user,
'replies': replies,
'count': replies.count()
}
)
else:
return error_to_response(request)
def delete_reply(request, early_return=False):
"""API delete_reply"""
if request.method == 'POST':
id = request.POST['id']
reply = get_object_or_404(Reply, pk=id)
if request.user == reply.user:
reply.status = '6deleted'
elif request.user.is_staff:
reply.status = '5hidden'
else:
return error_to_response(request)
reply.save()
if early_return:
return JsonResponse({'status': 'true'}, status=201)
article_id = reply.article_id
replies = Reply.objects.filter(article_id=article_id).annotate(
custom_order=Case(
When(reply_id=0, then='id'),
default='reply_id',
output_field=IntegerField(),
)
).order_by('custom_order', 'id')
article = get_object_or_404(Board, pk=article_id)
return render_to_response(
'boards/show_reply.html',
{
'user': request.user,
'article_user': article.user,
'replies': replies,
'count': replies.count()
}
)
return error_to_response(request)
def restore_reply(request):
"""API restore_reply"""
if request.method == 'POST':
id = request.POST['id']
reply = get_object_or_404(Reply, pk=id)
if request.user.is_staff:
reply.status = '1normal'
else:
return error_to_response(request)
reply.save()
article_id = reply.article_id
replies = Reply.objects.filter(article_id=article_id).annotate(
custom_order=Case(
When(reply_id=0, then='id'),
default='reply_id',
output_field=IntegerField(),
)
).order_by('custom_order', 'id')
article = get_object_or_404(Board, pk=article_id)
return render_to_response(
'boards/show_reply.html',
{
'user': request.user,
'article_user': article.user,
'replies': replies,
'count': replies.count()
}
)
return error_to_response(request)
def reply_count(request):
"""API reply_count"""
if request.method == 'POST':
id = request.POST['id']
article = get_object_or_404(Board, pk=id)
count = article.reply_count
return JsonResponse([count], safe=False, status=201)
return error_page(request)
def toggle_bookmark(request):
"""API toggle_bookmark"""
if request.method == 'POST':
app = request.POST['app']
id = request.POST['id']
app_id = app + '-' + id
profile = request.user.profile
bookmarks = profile.bookmarks.split(',')
if app_id not in bookmarks:
if len(bookmarks) > settings.MAX_BOOKMARKS:
return JsonResponse({'status': 'false'}, status=400)
if profile.bookmarks != '':
profile.bookmarks += ","
profile.bookmarks += app_id
data = static('icons/stared28.png')
else:
regstr = re.escape(app_id) + r"\b(,|)"
profile.bookmarks = re.sub(regstr, '', profile.bookmarks)
if profile.bookmarks and profile.bookmarks[-1] == ',':
profile.bookmarks = profile.bookmarks[:-1]
data = static('icons/star28.png')
request.user.profile.save()
return JsonResponse([data], safe=False, status=201)
return error_page(request)
def edit_bookmarks(request):
"""API edit_bookmarks"""
if request.method == 'POST':
bookmarks = dict(request.POST.iterlists())['bookmarks[]']
my_bookmarks = ''
for bm in bookmarks:
if my_bookmarks != '':
my_bookmarks += ","
my_bookmarks += bm
request.user.profile.bookmarks = my_bookmarks
request.user.profile.save()
return JsonResponse({'status': 'true'}, status=201)
return error_page(request)
def scrap(request):
"""API scrap"""
if request.method == 'POST':
app = request.POST['app']
id = request.POST['id']
app_id = app + ':' + id
profile = request.user.profile
scrap = profile.scrap.split(',')
if app_id not in scrap:
if profile.scrap != '':
profile.scrap += ","
profile.scrap += app_id
request.user.profile.save()
return JsonResponse({'status': 'true'}, status=201)
else:
return JsonResponse({'status': 'false'}, status=400)
return error_page(request)
def alarm_status(request):
"""API alarm_status"""
if request.user.is_authenticated:
return JsonResponse([request.user.profile.alarm], safe=False, status=201)
return JsonResponse({'status': 'false'}, status=400)
def alarm_list(request):
"""API alarm_list"""
if not request.user.is_authenticated:
return JsonResponse({'status': 'false'}, status=401)
if request.method == 'POST':
type = request.POST['type']
alarms = request.user.profile.alarm_list.split(',')
my_alarms = []
board_table = BoardTable()
name_list = board_table.get_table_list()
if request.user.profile.alarm_list != '':
total = len(alarms)
for alarm in reversed(alarms):
app, id = alarm.split(':')
if app == 'b':
item = Board.objects.filter(id__iexact=id)
elif app == 'r':
item = Reply.objects.filter(id__iexact=id)
elif app == 'l':
item = Blog.objects.filter(id__iexact=id)
elif app == 'c':
item = Comment.objects.filter(id__iexact=id)
elif app == 'pa' or app == 'pc' or app == 'pr':
item = Paper.objects.filter(id__iexact=id)
elif app == 't' or app == 'tf' or app == 'tc' or app == 'tl' \
or app == 'tk' or app == 'bt':
item = Team.objects.filter(id__iexact=id)
elif app == 'rt':
item = TeamReply.objects.filter(id__iexact=id)
else:
continue
if item.count():
my_alarms.append([app, item[0]])
if request.user.profile.alarm:
request.user.profile.alarm = False
request.user.profile.save()
else:
total = 0
return render_to_response(
'accounts/alarm_list.html',
{
'user': request.user,
'alarms': my_alarms,
'total': total,
'max': settings.ALARM_INBOX_MAX,
'type': type,
'name_list': name_list,
}
)
else:
return error_to_response(request)
return JsonResponse({'status': 'false'}, status=400)
def clear_alarm(request):
"""API clear_alarm"""
if not request.user.is_authenticated:
return JsonResponse({'status': 'false'}, status=401)
if request.method == 'POST':
request.user.profile.alarm_list = ''
request.user.profile.save()
return JsonResponse({'status': 'true'}, status=201)
else:
return JsonResponse({'status': 'false'}, status=400)
def delete_message(request):
"""API delete_message"""
if request.method == 'POST':
id = request.POST['id']
msg = get_object_or_404(Msg, pk=id)
if msg.sender == request.user:
msg.sender_status = '6deleted'
msg.save()
elif msg.recipient == request.user:
msg.recipient_status = '6deleted'
msg.save()
else:
return JsonResponse({'status': 'false'}, status=400)
return JsonResponse({'status': 'true'}, status=201)
return error_page(request)
def write_team_reply(request):
"""API write_team_reply"""
if not request.user.is_authenticated:
return JsonResponse({'status': 'false'}, status=401)
if request.method == 'POST':
id = request.POST['article_id']
reply_id = rt_id = int(request.POST['reply_id'])
reply_to = ''
form = TeamReplyEditForm(request.POST)
if form.is_valid():
article = get_object_or_404(Team, pk=id)
if (article.status == '5hidden' or article.status == '6deleted') \
and not request.user.is_staff:
return JsonResponse({'status': 'false'}, status=402)
reply = form.save(commit=False)
parent_id = reply_id
while parent_id != 0:
parent = get_object_or_404(TeamReply, pk=parent_id)
if parent:
if parent_id == reply_id and request.user != parent.user:
reply_to = parent.user.username
parent_id = parent.reply_id
if parent_id == 0:
reply_id = parent.id
else:
return JsonResponse({'status': 'false'}, status=400)
reply.reply_id = reply_id
reply.reply_to = reply_to
reply.status = '1normal'
reply.user = request.user
reply.ip = get_ipaddress(request)
reply.save()
article.reply_count += 1
article.save()
if article.user != request.user and reply_id == 0:
if article.user.profile.alarm_board:
if article.user.profile.alarm_list != '':
article.user.profile.alarm_list += ','
alarm_text = 'bt:%d' % article.id
article.user.profile.alarm_list += alarm_text
article.user.profile.alarm = True
article.user.profile.save()
elif reply_to != request.user.username and reply_id > 0:
user = User.objects.filter(username=reply_to)
if user:
if user[0].profile.alarm_reply:
if user[0].profile.alarm_list != '':
user[0].profile.alarm_list += ','
alarm_text = 'rt:%d' % rt_id
user[0].profile.alarm_list += alarm_text
user[0].profile.alarm = True
user[0].save()
request.user.profile.last_reply_at = timezone.now()
request.user.profile.point += settings.POINT_REPLY
request.user.profile.save()
replies = TeamReply.objects.filter(article_id=id).annotate(
custom_order=Case(
When(reply_id=0, then='id'),
default='reply_id',
output_field=IntegerField(),
)
).order_by('custom_order', 'id')
return render_to_response(
'teams/show_team_reply.html',
{
'user': request.user,
'article_user': article.user,
'replies': replies,
'count': replies.count()
}
)
return JsonResponse({'status': 'false'}, status=400)
else:
return error_to_response(request)
def reload_team_reply(request):
"""API reload_team_reply"""
if request.method == 'POST':
id = request.POST['id']
replies = TeamReply.objects.filter(article_id=id).annotate(
custom_order=Case(
When(reply_id=0, then='id'),
default='reply_id',
output_field=IntegerField(),
)
).order_by('custom_order', 'id')
article = get_object_or_404(Team, pk=id)
return render_to_response(
'teams/show_team_reply.html',
{
'user': request.user,
'article_user': article.user,
'replies': replies,
'count': replies.count()
}
)
else:
return error_to_response(request)
def delete_team_reply(request):
"""API delete_team_reply"""
if request.method == 'POST':
id = request.POST['id']
reply = get_object_or_404(TeamReply, pk=id)
if request.user == reply.user:
reply.status = '6deleted'
elif request.user.is_staff:
reply.status = '5hidden'
else:
return error_to_response(request)
reply.save()
article_id = reply.article_id
replies = TeamReply.objects.filter(article_id=article_id).annotate(
custom_order=Case(
When(reply_id=0, then='id'),
default='reply_id',
output_field=IntegerField(),
)
).order_by('custom_order', 'id')
article = get_object_or_404(Team, pk=article_id)
return render_to_response(
'teams/show_team_reply.html',
{
'user': request.user,
'article_user': article.user,
'replies': replies,
'count': replies.count()
}
)
return error_to_response(request)
def restore_team_reply(request):
"""API restore_team_reply"""
if request.method == 'POST':
id = request.POST['id']
reply = get_object_or_404(TeamReply, pk=id)
if request.user.is_staff:
reply.status = '1normal'
else:
return error_to_response(request)
reply.save()
article_id = reply.article_id
replies = TeamReply.objects.filter(article_id=article_id).annotate(
custom_order=Case(
When(reply_id=0, then='id'),
default='reply_id',
output_field=IntegerField(),
)
).order_by('custom_order', 'id')
article = get_object_or_404(Team, pk=article_id)
return render_to_response(
'teams/show_team_reply.html',
{
'user': request.user,
'article_user': article.user,
'replies': replies,
'count': replies.count()
}
)
return error_to_response(request)
def team_reply_count(request):
"""API team_reply_count"""
if request.method == 'POST':
id = request.POST['id']
article = get_object_or_404(Team, pk=id)
count = article.reply_count
slot = article.slot
return JsonResponse([count, slot], safe=False, status=201)
return error_page(request)
def join_team(request):
"""API join_team"""
if request.method == 'POST':
if not request.user.is_authenticated:
return JsonResponse({'status': 'false'}, status=401)
id = request.POST['id']
user = request.user
article = get_object_or_404(Team, pk=id)
if article.table == 1:
if not user.profile.id1:
return JsonResponse({'status': 'false'}, status=412)
elif article.table == 2:
if not user.profile.id2:
return JsonResponse({'status': 'false'}, status=412)
elif article.table == 3:
if not user.profile.id3:
return JsonResponse({'status': 'false'}, status=412)
if article.user == user:
return JsonResponse({'status': 'false'}, status=405)
if article.slot >= article.slot_total:
return JsonResponse({'status': 'false'}, status=406)
if article.status == '1normal':
slots = article.slot_users.all()
if user not in slots:
article.slot_users.add(user)
article.slot += 1
if article.slot == article.slot_total:
article.status = '8full'
article.save()
slot_users = article.slot_users.all()
if article.user.profile.alarm_team or (
article.slot == article.slot_total and
article.user.profile.alarm_full
):
if article.user.profile.alarm_list != '':
article.user.profile.alarm_list += ','
if article.user.profile.alarm_full and \
article.slot == article.slot_total:
alarm_text = 'f:%d' % article.id
else:
alarm_text = 't:%d' % article.id
article.user.profile.alarm_list += alarm_text
article.user.profile.alarm = True
article.user.save()
if article.slot == article.slot_total:
for slot_user in slot_users:
if slot_user.profile.alarm_full:
if slot_user.profile.alarm_list != '':
slot_user.profile.alarm_list += ','
alarm_text = 'f:%d' % article.id
slot_user.profile.alarm_list += alarm_text
slot_user.profile.alarm = True
slot_user.save()
return render_to_response(
'teams/show_team.html',
{
'user': user,
'table': article.table,
'article_id': article.id,
'article_user': article.user,
'slot_in': article.slot,
'empty_slots': article.slot_total - article.slot,
'slot_users': slot_users,
}
)
else:
return JsonResponse({'status': 'false'}, status=405)
elif article.status == '7canceled':
return JsonResponse({'status': 'false'}, status=410)
elif article.status == '8full':
return JsonResponse({'status': 'false'}, status=406)
elif article.status == '5hidden' or article.status == '6deleted':
return JsonResponse({'status': 'false'}, status=404)
else:
return JsonResponse({'status': 'false'}, status=400)
else:
return error_page(request)
def leave_team(request):
"""API leave_team"""
if request.method == 'POST':
if not request.user.is_authenticated:
return JsonResponse({'status': 'false'}, status=401)
id = request.POST['id']
user = request.user
article = get_object_or_404(Team, pk=id)
if article.user == user:
return JsonResponse({'status': 'false'}, status=403)
slots = article.slot_users.all()
if user in slots:
article.slot_users.remove(user)
if article.slot > 1:
article.slot -= 1
if article.status == '8full':
article.status = '1normal'
article.save()
slot_users = article.slot_users.all()
if article.user.profile.alarm_team:
if article.user.profile.alarm_list != '':
article.user.profile.alarm_list += ','
alarm_text = 'l:%d' % article.id
article.user.profile.alarm_list += alarm_text
article.user.profile.alarm = True
article.user.save()
return render_to_response(
'teams/show_team.html',
{
'user': user,
'table': article.table,
'article_id': article.id,
'article_user': article.user,
'slot_in': article.slot,
'empty_slots': article.slot_total - article.slot,
'slot_users': slot_users,
}
)
else:
return JsonResponse({'status': 'false'}, status=404)
else:
return error_page(request)
def kick_player(request):
"""API reload_team"""
if request.method == 'POST':
if not request.user.is_authenticated:
return JsonResponse({'status': 'false'}, status=401)
id = request.POST['id']
kick = request.POST['kick_user']
kick_user = User.objects.filter(username__iexact=kick).get()
user = request.user
article = get_object_or_404(Team, pk=id)
if article.user != user and not user.is_staff:
return JsonResponse({'status': 'false'}, status=403)
slots = article.slot_users.all()
if kick_user in slots:
article.slot_users.remove(kick_user)
if article.slot > 1:
article.slot -= 1
if article.status == '8full':
article.status = '1normal'
article.save()
slot_users = article.slot_users.all()
if article.user.profile.alarm_team:
if article.user.profile.alarm_list != '':
article.user.profile.alarm_list += ','
alarm_text = 'l:%d' % article.id
article.user.profile.alarm_list += alarm_text
article.user.profile.alarm = True
article.user.save()
if kick_user.profile.alarm_list != '':
kick_user.profile.alarm_list += ','
alarm_text = 'k:%d' % article.id
kick_user.profile.alarm_list += alarm_text
kick_user.profile.alarm = True
kick_user.save()
return render_to_response(
'teams/show_team.html',
{
'user': user,
'table': article.table,
'article_id': article.id,
'article_user': article.user,
'slot_in': article.slot,
'empty_slots': article.slot_total - article.slot,
'slot_users': slot_users,
}
)
else:
return JsonResponse({'status': 'false'}, status=404)
else:
return error_page(request)
def reload_team(request):
"""API reload_team"""
if request.method == 'POST':
id = request.POST['id']
article = get_object_or_404(Team, pk=id)
slot_users = article.slot_users.all()
return render_to_response(
'teams/show_team.html',
{
'user': request.user,
'table': article.table,
'article_id': article.id,
'article_user': article.user,
'slot_in': article.slot,
'empty_slots': article.slot_total - article.slot,
'slot_users': slot_users,
}
)
else:
return error_to_response(request)
def like_post(request):
"""API like_post"""
if request.method == 'POST':
id = request.POST['id']
ip = get_ipaddress(request)
user = request.user
post = get_object_or_404(Blog, pk=id)
like_users = post.like_users.split(',')
if post.user == user or ip == post.ip:
msg = _("You like your own post?")
return JsonResponse([0, msg], safe=False, status=201)
if ip not in like_users:
if post.like_users != '':
post.like_users += ","
post.like_users += ip
post.like_count += 1
post.save()
msg = _("You've liked this article")
return JsonResponse(
[post.like_count, msg], safe=False, status=201)
else:
msg = _("You've already liked")
return JsonResponse([0, msg], safe=False, status=201)
else:
return error_page(request)
def write_comment(request):
"""API write_comment"""
if request.method == 'POST':
id = request.POST['post_id']
comment_id = r_id = int(request.POST['comment_id'])
username = request.POST['username']
form = CommentEditForm(request.POST)
if form.is_valid():
post = get_object_or_404(Blog, pk=id)
if post.status != '1normal' and not request.user.is_staff:
return JsonResponse({'status': 'false'}, status=402)
comment = form.save(commit=False)
parent_id = comment_id
while parent_id != 0:
parent = get_object_or_404(Comment, pk=parent_id)
if parent:
parent_id = parent.comment_id
if parent_id == 0:
comment_id = parent.id
else:
return JsonResponse({'status': 'false'}, status=400)
comment.comment_id = comment_id
comment.ip = get_ipaddress(request)
comment.status = '1normal'
if request.user.is_authenticated:
comment.userid = request.user.username
else:
comment.username = username
if check_spam(request, comment):
comment.status = '7spam'
comment.save()
post.comment_count += 1
post.save()
if comment.status != '7spam':
if post.user != request.user and comment_id == 0:
if post.user.profile.alarm_list != '':
post.user.profile.alarm_list += ','
alarm_text = 'l:%d' % post.id
post.user.profile.alarm_list += alarm_text
post.user.profile.alarm = True
post.user.profile.save()
elif comment_id > 0:
comment_to = get_object_or_404(Comment, pk=comment_id)
if comment_to and comment_to.userid:
user = User.objects.filter(username=comment_to.userid)
if user and user[0] is not request.user:
if user[0].profile.alarm_reply:
if user[0].profile.alarm_list != '':
user[0].profile.alarm_list += ','
alarm_text = 'c:%d' % r_id
user[0].profile.alarm_list += alarm_text
user[0].profile.alarm = True
user[0].save()
q = Q(status='1normal')
comments = Comment.objects.filter(post_id=id).filter(q).annotate(
custom_order=Case(
When(comment_id=0, then='id'),
default='comment_id',
output_field=IntegerField(),
)
).order_by('custom_order', 'id')
return render_to_response(
'blogs/show_comment.html',
{
'user': request.user,
'post_user': post.user.username,
'comments': comments,
'count': comments.count()
}
)
return JsonResponse({'status': 'false'}, status=400)
else:
return error_to_response(request)
def delete_comment(request):
"""API delete_comment"""
if request.method == 'POST':
id = request.POST['id']
comment = get_object_or_404(Comment, pk=id)
if request.user.username == comment.userid or request.user.is_staff:
comment.status = '6deleted'
else:
return error_to_response(request)
comment.save()
post_id = comment.post_id
post = get_object_or_404(Blog, pk=post_id)
post.comment_count -= 1
post.save()
q = Q(status='1normal')
comments = Comment.objects.filter(post_id=post_id).filter(q).annotate(
custom_order=Case(
When(post_id=0, then='id'),
default='post_id',
output_field=IntegerField(),
)
).order_by('custom_order', 'id')
return render_to_response(
'blogs/show_comment.html',
{
'user': request.user,
'post_user': post.user.username,
'comments': comments,
'count': comments.count()
}
)
return error_to_response(request)
def reload_comment(request):
"""API reload_comment"""
if request.method == 'POST':
id = request.POST['id']
post = get_object_or_404(Blog, pk=id)
q = Q(status='1normal')
comments = Comment.objects.filter(post_id=id).filter(q).annotate(
custom_order=Case(
When(post_id=0, then='id'),
default='post_id',
output_field=IntegerField(),
)
).order_by('custom_order', 'id')
return render_to_response(
'blogs/show_comment.html',
{
'user': request.user,
'post_user': post.user.username,
'comments': comments,
'count': comments.count()
}
)
else:
return error_to_response(request)
def user_by_name(request):
"""API user_by_name"""
if not request.user.is_authenticated:
return JsonResponse({'status': 'false'}, status=401)
if request.method == 'POST':
approval_type = request.POST['type']
name = request.POST['name']
blacklist = request.POST.getlist('blacklist[]')
q = Q(username__icontains=name) | Q(
first_name__icontains=name) | Q(last_name__icontains=name)
names = User.objects.filter(is_active=True).filter(q).exclude(
username__in=blacklist)
total = names.count()
if total == 1:
data = {
'type': approval_type,
'id': names[0].id,
'username': names[0].username,
'name': names[0].last_name,
'email': names[0].email,
'status': 'only',
}
return JsonResponse(data)
return render_to_response(
'papers/user_list.html',
{
'user': request.user,
'type': approval_type,
'names': names,
'total': total,
}
)
else:
return error_to_response(request)
def approve_paper(request):
"""API approve_paper"""
if not request.user.is_authenticated:
return JsonResponse({'status': 'false'}, status=401)
if request.method == 'POST':
id = request.POST['id']
comment = request.POST['comment']
paper = get_object_or_404(Paper, pk=id)
if paper.completed or request.user != paper.cc.last().user:
return JsonResponse({'status': 'false'}, status=403)
if paper.approved and request.user == paper.approver:
return JsonResponse({'status': 'false'}, status=403)
paper.updated_at = timezone.now()
if request.user == paper.approver:
paper.comment = comment
paper.approved = True
paper.approved_at = paper.updated_at
if paper.supporters.all():
paper.status = '2progress'
first = paper.supporters.first()
person = Person.objects.create(
order=first.order, user=first.user)
paper.cc.add(person)
else:
paper.status = '5completed'
paper.completed = True
for notifier in paper.notifiers.all():
paper.cc.add(notifier)
else:
index = paper.cc.last().order - 3
supporter = paper.supporters.all()[index]
if index < 0 or supporter.user != request.user:
return JsonResponse({'status': 'false'}, status=403)
else:
supporter.comment = comment
supporter.approved = True
supporter.approved_at = paper.updated_at
supporter.save()
if paper.supporters.last().user == request.user:
paper.status = '5completed'
paper.completed = True
for notifier in paper.notifiers.all():
paper.cc.add(notifier)
else:
next_supporter = paper.supporters.all()[index + 1]
person = Person.objects.create(
order=next_supporter.order, user=next_supporter.user)
paper.cc.add(person)
paper.save()
if paper.status == '5completed':
if paper.user.profile.alarm_paper:
if paper.user.profile.alarm_list != '':
paper.user.profile.alarm_list += ','
alarm_text = 'pc:%d' % paper.id
paper.user.profile.alarm_list += alarm_text
paper.user.profile.alarm = True
paper.user.profile.save()
if settings.PAPER_EMAIL_UPDATE:
thread = threading.Thread(
target=send_email_with_paper, args=(request, id))
thread.start()
else:
target = paper.cc.last().user
if target.profile.alarm_paper:
if target.profile.alarm_list != '':
target.profile.alarm_list += ','
alarm_text = 'pa:%d' % paper.id
target.profile.alarm_list += alarm_text
target.profile.alarm = True
target.profile.save()
return JsonResponse({'status': 'true'}, status=201)
def reject_paper(request):
"""API reject_paper"""
if not request.user.is_authenticated:
return JsonResponse({'status': 'false'}, status=401)
if request.method == 'POST':
id = request.POST['id']
comment = request.POST['comment']
paper = get_object_or_404(Paper, pk=id)
if not paper.completed and request.user == paper.user:
paper.updated_at = timezone.now()
paper.cancelmsg = comment
paper.status = '4canceled'
paper.completed = True
paper.save()
return JsonResponse({'status': 'true'}, status=201)
if paper.completed or request.user != paper.cc.last().user:
return JsonResponse({'status': 'false'}, status=403)
if paper.approved and request.user == paper.approver:
return JsonResponse({'status': 'false'}, status=403)
paper.updated_at = timezone.now()
if request.user == paper.approver:
paper.comment = comment
paper.rejected = True
paper.approved_at = paper.updated_at
else:
index = paper.cc.last().order - 3
supporter = paper.supporters.all()[index]
if index < 0 or supporter.user != request.user:
return JsonResponse({'status': 'false'}, status=403)
else:
supporter.comment = comment
supporter.rejected = True
supporter.approved_at = paper.updated_at
supporter.save()
paper.status = '3rejected'
paper.completed = True
paper.save()
if paper.user.profile.alarm_paper:
if paper.user.profile.alarm_list != '':
paper.user.profile.alarm_list += ','
alarm_text = 'pr:%d' % paper.id
paper.user.profile.alarm_list += alarm_text
paper.user.profile.alarm = True
paper.user.profile.save()
if settings.PAPER_EMAIL_UPDATE:
thread = threading.Thread(
target=send_email_with_paper, args=(request, id))
thread.start()
return JsonResponse({'status': 'true'}, status=201)
```
#### File: core/templatetags/context.py
```python
from django import template
register = template.Library()
@register.simple_tag(takes_context=True)
def request_path(context):
"""Return request.path"""
return context['request'].path
```
#### File: core/templatetags/status.py
```python
from django import template
from django.utils.translation import ugettext as _
register = template.Library()
@register.filter(name='status_to_text')
def _status_to_text(status):
if status == '1normal':
return _('status_published')
elif status == '2temp':
return "<font color=#0073aa>%s</font>" % _('status_draft')
elif status == '3notice':
return "<b>%s</b>" % _('status_notice')
elif status == '4warning':
return "<font color=#FF574F>%s</font>" % _('status_warning')
elif status == '5hidden':
return "<font color=#FF574F>%s</font>" % _('status_pending')
elif status == '6deleted':
return "<font color=#e54f44>%s</font>" % _('status_deleted')
elif status == '7spam':
return "<font color=#FF574F>%s</font>" % _('status_spam')
elif status == '1proposed':
return _('status_proposed')
elif status == '2progress':
return _('status_progress')
elif status == '3rejected':
return "<font color=#e54f44>%s</font>" % _('status_rejected')
elif status == '4canceled':
return "<font color=#e54f44>%s</font>" % _('status_canceled')
elif status == '5completed':
return "<font color=#0073aa>%s</font>" % _('status_completed')
@register.filter(name='status_to_text_vanilla')
def _status_to_text_vanilla(status):
if status == '1proposed':
return _('status_proposed')
elif status == '2progress':
return _('status_progress')
elif status == '3rejected':
return _('status_rejected')
elif status == '4canceled':
return _('status_canceled')
elif status == '5completed':
return _('status_completed')
```
#### File: core/templatetags/tags.py
```python
from django import template
register = template.Library()
@register.inclusion_tag('blogs/show_tags.html', takes_context=True)
def show_tags(context, tags):
"""Show tags"""
splits = tags.split(',')
for index, tag in enumerate(splits):
splits[index] = tag.lstrip()
return {
'tags': splits,
}
```
#### File: bbgo/msgs/models.py
```python
from __future__ import unicode_literals
from django.conf import settings
from django.db import models
from django.urls import reverse_lazy
from django.utils.translation import ugettext as _
class Msg(models.Model):
"""Message of msgs"""
MSG_STATUS = {
('1normal', _('status_normal')),
('2read', _('status_read')),
('5hidden', _('status_hidden')),
('6deleted', _('status_deleted')),
}
sender = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='msg_sender', on_delete=models.CASCADE)
recipient = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='msg_recipient', on_delete=models.CASCADE)
sender_status = models.CharField(
max_length=10, choices=MSG_STATUS, default='1normal')
recipient_status = models.CharField(
max_length=10, choices=MSG_STATUS, default='1normal')
text = models.TextField(max_length=settings.MSG_TEXT_MAX)
created_at = models.DateTimeField(auto_now_add=True)
ip = models.GenericIPAddressField()
def get_absolute_url(self):
"""Back to list"""
return reverse_lazy('msgs:inbox', kwargs={'page': 1})
```
#### File: bbgo/teams/table.py
```python
from __future__ import unicode_literals
from django.urls import reverse_lazy
class TeamTable():
"""Table for all team settings"""
TEAM_LIST_COUNT = 20 # 한 페이지에 표시될 게시물 수
CATEGORY = [
None,
['레이드', '나이트폴', '스트라이크', '퀘스트', '크루시블', '기타'], # 0
]
TEAM_TABLES = [
# ('게시판 제목', '게시판 설명', '카테고리')
['', '', 0], # 0
['파티찾기 PS4', '회원정보에 PSN 아이디를 지정해두면 파티 참여시 아이디가 자동으로 입력됩니다.', 1], # 1
['파티찾기 Xbox', '회원정보에 Live 아이디를 지정해두면 파티 참여시 아이디가 자동으로 입력됩니다.', 1], # 2
['파티찾기 PC', '회원정보에 배틀태그를 지정해두면 파티 참여시 아이디가 자동으로 입력됩니다.', 1], # 3
]
def get_list_count(self):
"""Get list count"""
return self.TEAM_LIST_COUNT
def get_table_len(self):
"""Get number of tables"""
return len(self.TEAM_TABLES)
def get_table_name(self, table):
"""Get name of the table"""
return self.TEAM_TABLES[int(table)][0]
def get_table_url(self, table):
"""Get URL of the table"""
return reverse_lazy('teams:recruitment', args=[table, 1])
def get_table_desc(self, table):
"""Get description of the table"""
return self.TEAM_TABLES[int(table)][1]
def get_table_category(self, table):
"""Get category of the table"""
return self.TEAM_TABLES[int(table)][2]
def get_category(self, table):
"""Get pre-defined category for the table"""
return self.CATEGORY[(self.TEAM_TABLES[int(table)][2])]
def get_table_list(self):
"""Get TEAM_TABLES"""
return self.TEAM_TABLES
```
#### File: bbgo/teams/views.py
```python
from __future__ import unicode_literals
from math import ceil
from core.utils import error_page, get_ipaddress
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.db.models import Q
from django.shortcuts import get_object_or_404, redirect, render
from django.utils import timezone
from django.utils.translation import ugettext as _
from .forms import TeamEditForm
from .models import Team
from .table import TeamTable
def recruitment(request, table=0, page=0):
"""Recruitment"""
team_table = TeamTable()
if int(table) >= team_table.get_table_len():
return error_page(request)
table_name = team_table.get_table_name(table)
if table_name == '':
return error_page(request)
if int(page) < 1:
return redirect('teams:recruitment', table=table, page=1)
table_desc = team_table.get_table_desc(table)
list_count = team_table.get_list_count()
current_page = int(page) - 1
start_at = current_page * list_count
end_at = start_at + list_count
q = Q(status='1normal') | Q(status='7canceled') | Q(status='8full') | Q(status='5hidden')
total = Team.objects.filter(table=table).filter(q).count()
lists = Team.objects.filter(table=table).filter(q).order_by('-id')[start_at:end_at]
index_total = int(ceil(float(total) / list_count))
index_begin = int(current_page / 10) * 10 + 1
index_end = mindex_end = index_total
if index_end - index_begin >= 10:
index_end = index_begin + 9
mindex_begin = int(current_page / 5) * 5 + 1
if mindex_end - mindex_begin >= 5:
mindex_end = mindex_begin + 4
if request.user.is_authenticated:
writable = True
else:
writable = False
return render(
request,
"teams/recruitment.html",
{
'lists': lists,
'total': total,
'table': table,
'table_name': table_name,
'table_desc': table_desc,
'page': current_page + 1,
'index_begin': index_begin,
'index_end': index_end + 1,
'mindex_begin': mindex_begin,
'mindex_end': mindex_end + 1,
'index_total': index_total,
'writable': writable,
}
)
def show_recruitment(request, id):
"""Show recruitment"""
article = get_object_or_404(Team, pk=id)
article.view_count += 1
article.save()
table = article.table
team_table = TeamTable()
table_name = team_table.get_table_name(table)
table_desc = team_table.get_table_desc(table)
if article.status != '1normal':
status_text = article.get_status_text()
else:
status_text = ''
return render(
request,
"teams/show_recruitment.html",
{
'article': article,
'table': table,
'table_name': table_name,
'table_desc': table_desc,
'status_text': status_text,
}
)
@login_required
def new_recruitment(request, table=0):
"""New recruitment"""
if request.method == "POST":
editform = TeamEditForm(request.POST)
if editform.is_valid():
article = editform.save(commit=False)
if article.status != '1normal':
if not request.user.is_staff:
errormsg = _("Wrong status from user.")
return error_page(request, errormsg)
article.user = request.user
article.ip = get_ipaddress(request)
article.table = table
article.save()
request.user.profile.last_article_at = timezone.now()
request.user.profile.point += settings.POINT_ARTICLE
request.user.profile.save()
return redirect(article.get_article_url())
elif request.method == "GET":
editform = TeamEditForm()
team_table = TeamTable()
if int(table) >= team_table.get_table_len():
return error_page(request)
table_name = team_table.get_table_name(table)
if table_name == '':
return error_page(request)
table_desc = team_table.get_table_desc(table)
category_choices = team_table.get_category(table)
return render(
request,
'teams/edit_recruitment.html',
{
'form': editform,
'edit_type': 'new',
'table_name': table_name,
'table_desc': table_desc,
'category_choices': category_choices,
}
)
@login_required
def edit_recruitment(request, id):
"""Edit recruitment"""
article = get_object_or_404(Team, pk=id)
if request.method == "POST":
editform = TeamEditForm(request.POST, instance=article)
if editform.is_valid():
article = editform.save(commit=False)
article.modified_at = timezone.now()
article.save()
request.user.profile.last_article_at = timezone.now()
request.user.profile.save()
return redirect(article.get_article_url())
elif request.method == "GET":
team_table = TeamTable()
if article.table >= team_table.get_table_len():
return error_page(request)
table_name = team_table.get_table_name(article.table)
if table_name == '':
return error_page(request)
table_desc = team_table.get_table_desc(article.table)
category_choices = team_table.get_category(article.table)
editform = TeamEditForm(instance=article)
return render(
request,
'teams/edit_recruitment.html',
{
'form': editform,
'edit_type': 'edit',
'table_name': table_name,
'table_desc': table_desc,
'category_choices': category_choices,
'category': article.category,
}
)
@login_required
def change_status(request, id, status):
"""Change status"""
article = get_object_or_404(Team, pk=id)
if request.user == article.user or request.user.is_staff:
if article.status != status:
if status == '1normal':
article.status = status
article.save()
elif status == '7canceled' or status == '8full':
article.status = status
article.save()
slot_users = article.slot_users.all()
for slot_user in slot_users:
if slot_user.profile.alarm_full:
if slot_user.profile.alarm_list != '':
slot_user.profile.alarm_list += ','
if status == '8full':
alarm_text = 'f:%d' % article.id
else:
alarm_text = 'c:%d' % article.id
slot_user.profile.alarm_list += alarm_text
slot_user.profile.alarm = True
slot_user.save()
return redirect(article.get_article_url())
else:
return error_page(request)
```
#### File: bbgo/vaults/views.py
```python
from __future__ import unicode_literals
from core.utils import error_page, is_mobile
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.auth.hashers import check_password, make_password
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.http import JsonResponse
from django.shortcuts import get_object_or_404, redirect, render
from django.utils import timezone
from django.utils.translation import ugettext as _
from .forms import CheckKeyForm, KeyEditForm, NewKeyForm, VaultEditForm
from .models import Key, Vault
@login_required
def check_seal(request):
"""Check seal"""
if settings.ENABLE_MASTERKEY:
opened = Key.objects.filter(user=request.user).exists()
return opened
else:
return True
@login_required
def key_expired(request):
"""Check key expiry"""
if settings.ENABLE_MASTERKEY:
key = Key.objects.filter(user=request.user).latest('created_at')
now = timezone.now()
if now > key.expiry:
return True, 0
else:
return False, int((key.expiry - now).total_seconds())
else:
return False, 0
@login_required
def check_key(request):
"""Check masterkey"""
if not settings.ENABLE_MASTERKEY:
return redirect('vaults:open_vault')
if not check_seal(request):
return redirect('vaults:new_key')
msg = ''
if request.method == "POST":
checkform = CheckKeyForm(request.POST)
if checkform.is_valid():
inputkey = checkform.save(commit=False)
key = Key.objects.filter(user=request.user).latest('created_at')
if check_password(inputkey.masterkey, key.masterkey):
key.expiry = timezone.now() + timezone.timedelta(
minutes=settings.MASTERKEY_SESSION_TIME)
key.save()
return redirect(key.get_absolute_url())
else:
msg = _('Wrong master key.')
elif request.method == "GET":
checkform = CheckKeyForm()
return render(
request,
"vaults/check_key.html",
{
'form': checkform,
'msg': msg,
}
)
@login_required
def new_key(request):
"""New key"""
if not settings.ENABLE_MASTERKEY:
return redirect('vaults:open_vault')
if check_seal(request):
return redirect('vaults:open_vault')
if request.method == "POST":
editform = NewKeyForm(request.POST)
if editform.is_valid():
key = editform.save(commit=False)
key.user = request.user
key.masterkey = make_password(key.masterkey)
key.save()
return redirect(key.get_absolute_url())
elif request.method == "GET":
editform = NewKeyForm()
return render(
request,
"vaults/new_key.html",
{
'form': editform,
}
)
@login_required
def edit_key(request):
"""Edit key"""
if not settings.ENABLE_MASTERKEY:
return redirect('vaults:open_vault')
if not check_seal(request):
return redirect('vaults:new_key')
msg = ''
key = Key.objects.filter(user=request.user).latest('created_at')
if request.method == "POST":
editform = KeyEditForm(request.POST)
if editform.is_valid():
newkey = editform.save(commit=False)
current_key = editform.cleaned_data['current_key']
if check_password(current_key, key.masterkey):
key.masterkey = make_password(newkey.masterkey)
key.created_at = timezone.now()
key.save()
return redirect(key.get_absolute_url())
else:
msg = _('Wrong master key.')
elif request.method == "GET":
editform = KeyEditForm()
return render(
request,
"vaults/edit_key.html",
{
'form': editform,
'msg': msg,
}
)
@login_required
def open_vault(request, category='all'):
"""Open vault"""
if not check_seal(request):
return redirect('vaults:new_key')
expired, expiry = key_expired(request)
if expired:
return check_key(request)
if category == 'all':
q = Q(user=request.user)
else:
q = Q(user=request.user) & Q(category__iexact=category)
vaults = Vault.objects.filter(q).order_by('category', 'order')
mobile = is_mobile(request)
return render(
request,
"vaults/show_vault.html",
{
'vaults': vaults,
'category': category,
'expiry': expiry,
'mobile': mobile,
}
)
@login_required
def new_vault(request):
"""New vault"""
if not check_seal(request):
return redirect('vaults:new_key')
expired, expiry = key_expired(request)
if expired:
return check_key(request)
if request.method == "POST":
editform = VaultEditForm(request.POST, request.FILES)
if editform.is_valid():
vault = editform.save(commit=False)
vault.user = request.user
try:
latest = Vault.objects.latest('id')
vault.order = latest.id + 1
except ObjectDoesNotExist:
vault.order = 1
vault.save()
return redirect(vault.get_absolute_url())
elif request.method == "GET":
editform = VaultEditForm()
return render(
request,
"vaults/edit_vault.html",
{
'form': editform,
'edit_type': 'new',
'expiry': expiry,
}
)
@login_required
def edit_vault(request, id):
"""Edit vault"""
if not check_seal(request):
return redirect('vaults:new_key')
expired, expiry = key_expired(request)
if expired:
return check_key(request)
vault = get_object_or_404(Vault, pk=id)
if vault.user != request.user:
return error_page(request)
if request.method == "POST":
editform = VaultEditForm(request.POST, request.FILES, instance=vault)
if editform.is_valid():
vault = editform.save(commit=False)
vault.save()
return redirect(vault.get_absolute_url())
elif request.method == "GET":
editform = VaultEditForm(instance=vault)
return render(
request,
"vaults/edit_vault.html",
{
'form': editform,
'edit_type': 'edit',
'vault': vault,
'expiry': expiry,
}
)
@login_required
def delete_vault(request, id):
"""Delete vault"""
vault = get_object_or_404(Vault, pk=id)
if vault.user != request.user:
return error_page(request)
vault.delete()
return redirect(vault.get_absolute_url())
@login_required
def save_order(request):
"""API save_order"""
if request.method == 'POST':
orders = dict(request.POST.iterlists())['order[]']
for index, order in enumerate(orders):
vault = get_object_or_404(Vault, pk=order)
vault.order = index + 1
vault.save()
return JsonResponse({'status': 'true'}, status=201)
return error_page(request)
@login_required
def extend_expiry(request):
"""API extend_expiry"""
if request.method == "POST":
if not settings.ENABLE_MASTERKEY:
return JsonResponse({'status': 'false'}, status=400)
expired, expiry = key_expired(request)
if expired:
return JsonResponse({'status': 'false'}, status=400)
else:
key = Key.objects.filter(user=request.user).latest('created_at')
key.expiry = timezone.now() + timezone.timedelta(
minutes=settings.MASTERKEY_SESSION_TIME)
key.save()
expiry_sec = settings.MASTERKEY_SESSION_TIME * 60
return JsonResponse([expiry_sec], safe=False, status=201)
return error_page(request)
``` |
{
"source": "6-Billionaires/gail",
"score": 2
} |
#### File: gail/network_models/policy_net.py
```python
import tensorflow as tf
import numpy as np
class Policy_net:
def __init__(self, name, env, action_space):
"""
:param name: string
:param env: gym env
"""
obs = env.init_observation()
act_space = action_space # hold, buy
with tf.variable_scope(name):
self.obs = tf.placeholder(dtype=tf.float32, shape=[None] + list(obs.shape), name='obs')
with tf.variable_scope('policy_net'):
layer_1 = tf.layers.dense(inputs=self.obs, units=20, activation=tf.tanh)
layer_2 = tf.layers.dense(inputs=layer_1, units=20, activation=tf.tanh)
layer_3 = tf.layers.dense(inputs=layer_2, units=len(act_space), activation=tf.tanh)
self.act_probs = tf.layers.dense(inputs=layer_3, units=len(act_space), activation=tf.nn.softmax)
with tf.variable_scope('value_net'):
layer_1 = tf.layers.dense(inputs=self.obs, units=20, activation=tf.tanh)
layer_2 = tf.layers.dense(inputs=layer_1, units=20, activation=tf.tanh)
self.v_preds = tf.layers.dense(inputs=layer_2, units=1, activation=None)
self.act_stochastic = tf.multinomial(tf.log(self.act_probs), num_samples=1)
self.act_stochastic = tf.reshape(self.act_stochastic, shape=[-1])
print('act_sto: ', self.act_stochastic)
self.act_deterministic = tf.argmax(self.act_probs, axis=1)
self.scope = tf.get_variable_scope().name
def act(self, obs, stochastic=True):
if stochastic:
return tf.get_default_session().run([self.act_stochastic, self.v_preds], feed_dict={self.obs: obs})
else:
return tf.get_default_session().run([self.act_deterministic, self.v_preds], feed_dict={self.obs: obs})
def get_action_prob(self, obs):
return tf.get_default_session().run(self.act_probs, feed_dict={self.obs: obs})
def get_variables(self):
return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self.scope)
def get_trainable_variables(self):
return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.scope)
``` |
{
"source": "6C1/Bhreus",
"score": 3
} |
#### File: Bhreus/bhreus/dom.py
```python
DEFAULT_TEXT_VALUE = "The cake is both a group and a smooth manifold!"
class Node(object):
"""
Superclass for all DOM nodes. Handles nodetypes, child types and children.
Also provides basic prettyprinting.
"""
def __init__(self, node_type="text", ctypes=[], children=[]):
self.node_type = node_type
self.children = []
self.ctypes = ctypes # valid types of children
for child in children:
self.add_child(child)
# Add a child node to this node
def add_child(self, n):
# Only add a child node if it's the right nodetype
if n.node_type in self.ctypes:
self.children.append(n)
return True
return False
def get_children(self):
return self.children
def pp(self, prefix=" "):
"""
Basic prettyprinting of a DOM tree.def
"""
print prefix+str(self)
[child.pp(prefix + " ") for child in self.get_children()]
if isinstance(self, ElementNode):
print prefix+self.close()
##############
# Node types #
##############
class TextNode(Node):
def __init__(self, text=DEFAULT_TEXT_VALUE, children=[]):
Node.__init__(self, "text", [], children)
self.text = text
def __str__(self):
return self.text
class ElementNode(Node):
def __init__(self, element_data, children=[]):
self.element_data = element_data
Node.__init__(self, "element", ["text", "element"], children)
def __str__(self):
s = "<"+str(self.element_data.tag_name)
for attribute, value in self.element_data.attributes.items():
s = "".join(s, " ", attribute, "=\"", value, "\"")
return s+">"
def close(self):
return "</"+self.element_data.tag_name+">"
# Element Data
class ElementData(object):
tag_name = ""
attributes = {}
def __init__(self, tag_name='p', attributes={}):
self.tag_name = tag_name
self.attributes = attributes
if __name__ == "__main__":
main()
```
#### File: Bhreus/bhreus/parse.py
```python
import string
from bhreus import dom
class Parser(object):
"""
Parser object.
"""
whitespace = string.whitespace
alphanum = string.letters+string.digits
def __init__(self, html=""):
self.set_html(html)
def set_html(self, html):
self.pos = 0
# Filter characters to ascii
self.html = "".join(filter(lambda c: c in string.printable, html))
###################
# PARSING METHODS #
###################
# Parse the entire source file
def parse(self):
nodes = self.parse_nodes()
# If there's a root node, return that.
if len(nodes) == 1:
return nodes[0]
# Otherwise, make one.
return dom.ElementNode(
ElementData("html", {}),
nodes)
# Parse one node
def parse_node(self):
if self.next_char() == "<":
return self.parse_element()
return self.parse_text()
# Parse a text node
def parse_text(self):
return dom.TextNode(self.consume_while(lambda c: c != "<"))
# Parse an element node
def parse_element(self):
element = ""
# Element tag
assert self.consume_char() == "<"
tag_name = self.consume_alphanum()
attributes = self.parse_attributes()
assert self.consume_char() == ">"
# Element content
children = self.parse_nodes()
# Element closing tag
assert self.consume_char() == "<"
assert self.consume_char() == "/"
closing_tag_name = self.consume_alphanum()
assert closing_tag_name == tag_name
assert self.consume_char() == ">"
return dom.ElementNode(dom.ElementData(tag_name, attributes), children)
# Parse a tag attribute
def parse_attribute(self):
name = self.consume_alphanum()
assert self.consume_char() == "="
value = self.parse_attribute_value()
return (name, value)
# Parse an attribute value
def parse_attribute_value(self):
openquote = self.consume_char()
assert openquote in ("'", "\"")
value = self.consume_while(lambda c: c != openquote)
assert self.consume_char() == openquote
return value
# Parse all attributes of a tag
def parse_attributes(self):
attributes = {}
while True:
self.consume_whitespace()
if self.next_char() != ">":
name, value = self.parse_attribute()
attributes[name] = value
else:
break
return attributes
# Parse all child nodes
def parse_nodes(self):
nodes = []
while True:
self.consume_whitespace()
if self.eof() or self.startswith("</"):
break
nodes.append(self.parse_node())
return nodes
#####################
# CHARACTER METHODS #
#####################
# Return next character without consuming
def next_char(self):
return self.html[self.pos]
# Consume and return one character
def consume_char(self):
cur_char = self.html[self.pos]
self.pos += 1
return cur_char
# Consume characters as long as they pass test.
def consume_while(self, test=lambda x: False):
result = ""
while not self.eof() and test(self.next_char()):
result += self.consume_char()
return result
# Consume characters as long as they're whitespace
def consume_whitespace(self):
return self.consume_while(lambda a: a in self.whitespace)
# Consume characters as long as they're alphanumeric
def consume_alphanum(self):
return self.consume_while(lambda a: a in self.alphanum)
########################
# STATUS CHECK METHODS #
########################
# Check if the remaining html starts with a given string
def startswith(self, s):
return self.html[self.pos:].startswith(s)
# Check if we're at the end of the file
def eof(self):
return not self.pos < len(self.html)
def main():
"""
Example.
"""
example_html = (
"<html><body>"
"<p id='first-line'>Thunderbolts and lightning</p>"
"<p id='second-line'>Very very frightening</p>"
"</body></html>"
)
example_parser = Parser(example_html)
example_dom = example_parser.parse()
example_dom.pp()
if __name__ == "__main__":
main()
``` |
{
"source": "6C1/BloomPy",
"score": 3
} |
#### File: BloomPy/bloompy/bloom.py
```python
from mmh3 import hash as h
from sys import maxint
from bitarray import bitarray
class BloomFilter(object):
def __init__(self,m=maxint,k=7):
# Create underlying array
self._ar = bitarray(m)
self._ar.setall(False)
# Generate k salted hash functions, using murmur
self._hashes = map(lambda i: # for i from 0 to k-1
(lambda j: # make an anonymous hash function
h(str(j)+str(i*len(str(j))%(i+1))) % m), # seeded based on i
xrange(k))
# Set up counter
self.len = 0
# Store arbitrary data
def store(self, x):
for f in self._hashes: self._ar[f(x)] = True
self.len += 1
# Zero all data
def wipe(self):
self._ar.setall(False)
self.len = 0
# Approximate number of collisions
def collisions(self):
return self.len - self._ar.count()
# Implementing membership test operations
def __contains__(self, item):
return filter(lambda i: not self._ar[i], [ f(item) for f in self._hashes])==[]
# Implementing builtin function len()
def __len__(self): return self.len
```
#### File: BloomPy/tests/bloompy_test.py
```python
from nose import *
from bloompy.bloom import *
from random import choice as rc
from string import printable as ascii
from string import digits
def basic_test():
k = 16180
b = BloomFilter()
for i in xrange(k):
s = "".join([rc(ascii) for j in xrange(64)])
b.store(s)
assert s in b
def int_test():
k = 16180
b = BloomFilter()
for i in xrange(k):
s = int("".join([rc(digits) for j in xrange(64)]))
b.store(s)
assert s in b
``` |
{
"source": "6C1/minimail",
"score": 3
} |
#### File: minimail/minimail/minimail.py
```python
import argparse
import ConfigParser
from email.mime.text import MIMEText
import getpass
from os.path import isfile, expanduser
import smtplib
import sys
CONFIG_FILE = "{}/.minimail".format(expanduser('~'))
DEFAULT_STRING = ("The sun is a miasma / "
"Of incandescent plasma / "
"The sun's not simply made out of gas / "
"(No, no, no) / "
"The sun is a quagmire / "
"It's not made of fire / "
"Forget what you've been told in the past"
)
def main():
# Get arguments
args = parse_args()
# Get SMTP configuration
email, host, port = configure(args["c"])
# Process arguments
args = process_args(args)
# Get email password
pw = getpass.getpass("Email password\n>> ")
# Send email
send(html_body(args['body']),
args['subject'],
args['recipient'],
email,
host,
port,
pw)
def configure(config_flag):
'''
Handles SMTP configuration.
If either config_flag is set or the config file
does not exist, prompt for one-time initial config.
'''
if not isfile(CONFIG_FILE) or config_flag:
init_config()
config = ConfigParser.RawConfigParser()
config.read(CONFIG_FILE)
return (config.get("SMTP", "email"),
config.get("SMTP", "host"),
config.getint("SMTP", "port"))
def init_config():
print "\nMinimail Configuration. (You won't need to do this again.)\n"
config = ConfigParser.RawConfigParser()
config.add_section('SMTP')
# Get config settings
config.set('SMTP', 'host', raw_input("SMTP host address\n>> "))
config.set('SMTP', 'port', raw_input("Port number\n>> "))
config.set('SMTP', 'email', raw_input("Email address\n>> "))
# Write the configuration
with open(CONFIG_FILE, "wb") as config_file:
config.write(config_file)
print "\nConfiguration complete!\n"
def html_body(body):
'''
Sew up the body into html paragraphs.
'''
return "".join(["<p>{}</p>".format(line) for line in body])
def process_args(args):
'''
Processes each argument, returns dictionary.
'''
return {arg.lower(): process_arg(arg, args) for arg in args}
def process_arg(arg, args):
'''
Processes an individual argument. Prompts for input if no
argument was given. Formats body argument as list if only
one body line was given.
'''
result = get_arg(arg) if args[arg] == DEFAULT_STRING else args[arg]
return result if arg != "Body" or isinstance(result, list) else [result]
def get_arg(arg):
'''
Gets an argument from the user.
'''
return raw_input("Enter {}:\n>> ".format(arg.lower()))
def send(body, subj, to_addr, from_addr, host, port, pw):
'''
Sends an email.
'''
msg = MIMEText(body, 'html')
msg['subject'] = subj
msg['from'] = from_addr
msg['to'] = to_addr
try:
s = smtplib.SMTP(host, port)
s.login(from_addr, pw)
s.sendmail(from_addr, to_addr, msg.as_string())
s.quit()
print "Message delivered"
except Exception as e:
print e
exit()
def parse_args():
'''
Parse cli arguments.
'''
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("Recipient",
help="The address you're emailing",
nargs="?",
default=DEFAULT_STRING)
parser.add_argument("Subject",
help="What you're emailing about",
nargs="?",
default=DEFAULT_STRING)
parser.add_argument("Body",
help="What your email says",
nargs="*",
default=DEFAULT_STRING)
parser.add_argument("-c",
help="Configure SMTP settings",
action='store_true')
return vars(parser.parse_args())
if __name__ == "__main__":
main()
``` |
{
"source": "6cloud/6cloud_cmdb",
"score": 2
} |
#### File: cmdb/source/models.py
```python
import uuid
from django.db import models
from system.models import User
class BaseModel(models.Model):
id = models.UUIDField(default=uuid.uuid4, primary_key=True, verbose_name='ID')
is_deleted = models.BooleanField(default=False, verbose_name='是否被删除')
desc = models.TextField(blank=True, null=True, verbose_name='描述')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
update_time = models.DateTimeField(auto_now=True, verbose_name='更新时间')
class Meta:
abstract = True
class Business(BaseModel):
name = models.CharField(max_length=32, unique=True, verbose_name='业务名')
leader = models.ForeignKey(User, verbose_name='负责人')
class Meta:
verbose_name = '业务'
verbose_name_plural = verbose_name
permissions = (
('list_business', ('获取业务列表')),
('get_business', ('获取业务信息')),
('add_business', ('添加业务')),
('change_business', ('修改业务信息')),
('delete_business', ('删除业务')),
)
default_permissions = ()
def __str__(self):
return self.name
class Application(BaseModel):
name = models.CharField(max_length=32, unique=True, verbose_name='应用')
business = models.ForeignKey(Business, related_name='application_business', verbose_name='所属业务')
ops = models.ForeignKey(User, related_name='application_ops', verbose_name='运维负责人')
dev = models.ForeignKey(User, related_name='application_dev', blank=True, null=True, verbose_name='研发负责人')
qa = models.ForeignKey(User, related_name='application_qa', blank=True, null=True, verbose_name='测试负责人')
cs = models.ForeignKey(User, related_name='application_cs', blank=True, null=True, verbose_name='安全负责人')
class Meta:
verbose_name = '应用'
verbose_name_plural = verbose_name
permissions = (
('list_application', ('获取应用列表')),
('get_application', ('获取应用信息')),
('add_application', ('添加应用')),
('change_application', ('修改应用信息')),
('delete_application', ('删除应用')),
)
default_permissions = ()
def __str__(self):
return self.name
class SystemUser(BaseModel):
name = models.CharField(max_length=128, verbose_name='名称')
username = models.CharField(max_length=32, blank=True, null=True, verbose_name='用户名')
password = models.CharField(max_length=256, blank=True, null=True, verbose_name='密码')
private_key = models.TextField(max_length=4096, blank=True, null=True, verbose_name='私钥')
public_key = models.TextField(max_length=4096, blank=True, null=True, verbose_name='公钥')
class Meta:
verbose_name = '系统用户'
verbose_name_plural = verbose_name
permissions = (
('list_systemuser', ('获取系统用户列表')),
('get_systemuser', ('获取系统用户信息')),
('add_systemuser', ('添加系统用户')),
('change_systemuser', ('修改系统用户信息')),
('delete_systemuser', ('删除系统用户')),
)
default_permissions = ()
def __str__(self):
return self.name
class Colony(BaseModel):
"""
Colony
"""
name = models.CharField(max_length=64, unique=True, verbose_name='集群名')
application = models.ForeignKey(Application, related_name='colony_application', verbose_name='所属应用')
class Meta:
verbose_name = '集群'
verbose_name_plural = verbose_name
permissions = (
('list_hostgroup', ('获取集群列表')),
('get_hostgroup', ('获取集群信息')),
('add_hostgroup', ('添加集群')),
('change_hostgroup', ('修改集群信息')),
('delete_hostgroup', ('删除集群')),
)
default_permissions = ()
def __str__(self):
return self.name
class IDC(BaseModel):
"""
IDC model
"""
name = models.CharField(max_length=64, verbose_name='机房名')
address = models.CharField(max_length=128, verbose_name='所在地址')
phone = models.CharField(max_length=32, blank=True, null=True, verbose_name='联系电话')
manage_user = models.ForeignKey(User, related_name='idc_manager', verbose_name='联系人')
class Meta:
verbose_name = '机房'
verbose_name_plural = verbose_name
permissions = (
('list_idc', ('获取机房列表')),
('get_idc', ('获取机房信息')),
('add_idc', ('添加机房')),
('change_idc', ('修改机房信息')),
('delete_idc', ('删除机房')),
)
default_permissions = ()
def __str__(self):
return self.name
class Cabinet(BaseModel):
"""
Cabinet model
"""
idc = models.ForeignKey(IDC, related_name='cabinet_idc', verbose_name='所属机房')
address = models.CharField(max_length=128, unique=True, verbose_name='所在机房位置')
unum = models.IntegerField(default=0, verbose_name='机柜U个数')
hosts = models.ManyToManyField('Host', related_name='cabinet_hosts', blank=True, verbose_name='主机')
status = models.IntegerField(default=0, verbose_name='状态')
class Meta:
verbose_name = '机柜'
verbose_name_plural = verbose_name
permissions = (
('list_cabinet', ('获取机柜列表')),
('get_cabinet', ('获取机柜信息')),
('add_cabinet', ('添加机柜')),
('change_cabinet', ('修改机柜信息')),
('delete_cabinet', ('删除机柜')),
)
default_permissions = ()
def __str__(self):
return self.address
class Mem(BaseModel):
"""
Mem model
"""
pass
class Cpu(BaseModel):
"""
Cpu model
"""
pass
class Disk(BaseModel):
"""
Disk model
"""
pass
class NIC(BaseModel):
"""
NIC model
"""
class Host(BaseModel):
"""
Host model
"""
HOST_STATUS = (
(1, '使用中'),
(2, '空闲中'),
(3, '故障中'),
(4, '测试机'),
(5, '开发机'),
(6, '维修中')
)
hostname = models.CharField(max_length=64, unique=True, verbose_name='主机名')
intranet_ipaddress = models.GenericIPAddressField(unique=True, verbose_name='内网IP')
network_ipaddress = models.GenericIPAddressField(unique=True, blank=True, null=True, verbose_name='外网IP')
host_type = models.CharField(max_length=32, default='server', verbose_name='主机类型')
macaddress = models.CharField(max_length=32, blank=True, null=True, verbose_name='Mac地址')
sn = models.CharField(max_length=128, blank=True, null=True, verbose_name='SN号')
manufacturer = models.CharField(max_length=64, blank=True, null=True, verbose_name='厂商')
port = models.SmallIntegerField(default=22, verbose_name='端口')
os_type = models.CharField(max_length=32, blank=True, null=True, verbose_name='系统类型')
os_version = models.CharField(max_length=64, blank=True, null=True, verbose_name='系统版本')
mem = models.ForeignKey(Mem, blank=True, null=True, verbose_name='内存')
cpu = models.ForeignKey(Cpu, blank=True, null=True, verbose_name='CPU')
disk = models.ForeignKey(Disk, blank=True, null=True, verbose_name='磁盘')
nic = models.ForeignKey(NIC, blank=True, null=True, verbose_name='网卡')
systemuser = models.ForeignKey(SystemUser, related_name='host_systemuser', blank=True, null=True, verbose_name='系统用户')
colony = models.ManyToManyField(Colony, related_name='host_colony', blank=True, verbose_name='所属集群')
application = models.ForeignKey(Application, related_name='host_application', blank=True, null=True, verbose_name='所属应用')
status = models.IntegerField(default=1, choices=HOST_STATUS, verbose_name='状态')
description = models.TextField(max_length=512, blank=True, null=True, verbose_name='描述')
class Meta:
verbose_name = '主机'
verbose_name_plural = verbose_name
permissions = (
('list_host', ('获取主机列表')),
('get_host', ('获取主机信息')),
('add_host', ('添加主机')),
('change_host', ('修改主机信息')),
('delete_host', ('删除主机')),
)
default_permissions = ()
def __str__(self):
return self.hostname
```
#### File: cmdb/system/views.py
```python
from rest_framework.views import APIView
from rest_framework import viewsets
from rest_framework.authentication import authenticate
from rest_framework import authtoken
from rest_framework.response import Response
from rest_framework.status import HTTP_400_BAD_REQUEST, HTTP_200_OK
from system.serializers import UserSerializer, RoleSerializer
from system.models import User, Role
class UserLoginApi(APIView):
permission_classes = ()
authentication_classes = ()
def post(self, request):
data = request['POST']
username = data.get('username')
password = data.get('password')
authenticater = authenticate(username=username, password=password)
if authenticater:
pass
return Response(authenticater, status=HTTP_200_OK)
return Response({'msg': '认证失败!'}, status=HTTP_400_BAD_REQUEST)
class UserActionApi(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = ()
authentication_classes = ()
# def list(self, request, *args, **kwargs):
# pass
# def retrieve(self, request, *args, **kwargs):
# pass
#
# def create(self, request, *args, **kwargs):
# pass
#
# def update(self, request, *args, **kwargs):
# pass
#
# def destroy(self, request, *args, **kwargs):
# pass
class RoleActionApi(viewsets.ModelViewSet):
queryset = Role.objects.all()
serializer_class = RoleSerializer
permission_classes = ()
authentication_classes = ()
``` |
{
"source": "6DammK9/FastMOT",
"score": 3
} |
#### File: fastmot/plugins/get_compute.py
```python
import sys
import ctypes
import logging
logger = logging.getLogger(__name__)
CUDA_SUCCESS = 0
def main():
libnames = ('libcuda.so', 'libcuda.dylib', 'cuda.dll')
for libname in libnames:
try:
cuda = ctypes.CDLL(libname)
except OSError:
continue
else:
break
else:
return
gpu_archs = set()
n_gpus = ctypes.c_int()
cc_major = ctypes.c_int()
cc_minor = ctypes.c_int()
result = ctypes.c_int()
device = ctypes.c_int()
error_str = ctypes.c_char_p()
result = cuda.cuInit(0)
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
logger.error('cuInit failed with error code %d: %s' % (result, error_str.value.decode()))
return 1
result = cuda.cuDeviceGetCount(ctypes.byref(n_gpus))
if result != CUDA_SUCCESS:
cuda.cuGetErrorString(result, ctypes.byref(error_str))
logger.error('cuDeviceGetCount failed with error code %d: %s' % (result, error_str.value.decode()))
return 1
for i in range(n_gpus.value):
if cuda.cuDeviceComputeCapability(ctypes.byref(cc_major), ctypes.byref(cc_minor), device) == CUDA_SUCCESS:
gpu_archs.add(str(cc_major.value) + str(cc_minor.value))
logger.info(' '.join(gpu_archs))
return 0
if __name__ == '__main__':
sys.exit(main())
```
#### File: FastMOT/feathersjssio/sio_client.py
```python
import socketio
import random
import threading
import calendar
import time
import os
import logging
from urllib.parse import urlparse
from fastmot.utils import NpEncoder
import json
from sys import stdout, stderr, exc_info
logger = logging.getLogger(__name__)
class SIOClient(threading.Thread):
def __init__(self,
output_uri = None,
device_id = "unknown",
FEATHERSJS_SOCKET = {}
):
# Create as a new thread
random.seed()
threading.Thread.__init__(self)
self.output_uri = output_uri
io_server_url, socketio_path, transports, primary_chanel = self.parseURL(self.output_uri)
self.io_server_url = io_server_url
self.socketio_path = socketio_path
self.transports = transports
self.primary_chanel = primary_chanel
self.device_id = device_id
#self.cond = threading.Condition()
# Keep logger=true otherwise it is impossible to debug
# #logger=FEATHERSJS_SOCKET['sio_logger'], engineio_logger=FEATHERSJS_SOCKET['engineio_logger']
self.sio = socketio.Client(**vars(FEATHERSJS_SOCKET)) #logger=True, engineio_logger=True
logger.info("FeathersJS socket.io Connection initialize")
@self.sio.event
def message(data):
logger.info('on_message: ', data)
@self.sio.on('connect') #, namespace=primary_chanel
def connect(): #on_
logger.info('on_connect')
@self.sio.event
def connect_error(data):
logger.info('on_connection_error')
@self.sio.event
def disconnect():
logger.info('on_disconnect')
#Inherted from threading.Thread
#def start(self):
# print("Thread start")
# logger.info("start")
# pass
def run(self):
#print("Thread run")
logger.info("run")
#https://gitmemory.cn/repo/miguelgrinberg/python-socketio/issues/821
#https://stackoverflow.com/questions/31851514/how-does-thread-init-self-in-a-class-work
#Note: Upon timeout, this client suddenly BLOCK IO!
#print("url", self.io_server_url)
self.sio.connect(url=self.io_server_url, socketio_path=self.socketio_path,
transports=self.transports, wait_timeout=30
)
# engineio.__version__ = '4.2.1dev'
#print('my sid is', self.sio.sid)
logger.info("Connected as SID: ", self.sio.sid)
#self.resetTimer('start')
#Send message immediately?
if False:
test_payload = {
'deviceId': 'fastmot',
'foo': 'bar'
}
print("Sending test payload...")
self.sio.emit("create", (self.primary_chanel, test_payload))
def stop(self):
self.sio.disconnect()
# self.deviceconnection.stop()
#logger.info("stop program in 5 seconds...")
#time.sleep(5)
#os._exit(0)
#logger.info("restarting...")
#self.start()
def put_msg(self, sensor_data):
# https://github.com/feathersjs/feathers/issues/1471
# https://github.com/miguelgrinberg/python-socketio/issues/321
#sensor_data = {
# 'deviceId': 'fastmot',
# 'foo': 'bar'
#}
#print('put_msg: ', self.primary_chanel)
#print(sensor_data)
#sensor_data = json.dumps(sensor_data, cls=NpEncoder) #NumpyEncoder
#https://socket.io/docs/v4/namespaces/
payload = {
'deviceId': self.device_id,
'sensor_data': sensor_data
}
try:
if self.sio.connected:
self.sio.emit("create", (self.primary_chanel, payload))
except:
logger.error(str(exc_info()[0]) + " " + str(exc_info()[1]))
def on_trackevt(self, sensor_data):
#logger.info('on_trackevt()')
#logger.info('raw message: ' + str(sensor_data))
self.put_msg(sensor_data)
def parseURL(self, ws_url):
sep = '/'
up = urlparse(ws_url)
ps = up.path.split(sep) #['taihang', 'api', 'live', 'fastmot']
ws_scheme = up.scheme
#print("old ws_scheme", ws_scheme)
if ws_scheme == 'https':
ws_scheme = 'wss'
elif ws_scheme == 'http':
ws_scheme = 'ws'
#print("new ws_scheme", ws_scheme)
#"ws://192.168.2.114:3465/taihang/api/live/fastmot"
#"wss://webtest.etag-hk.com/taihang/api/live/fastmot"
# "WS_URL": "http://localhost:3465",
# "WS_PATH": "/taihang/api/socketio/",
#https://github.com/miguelgrinberg/python-socketio/blob/main/src/socketio/client.py
#https://github.com/miguelgrinberg/python-engineio/blob/main/src/engineio/client.py
#Why skip all the sub-directory?
io_server_url = '%s://%s' % (ws_scheme, up.netloc)
transports = "websocket" #polling
#print(ps)
if len(ps) == 5:
socketio_path = '%s/socketio' % (sep.join(ps[1:3]))
primary_chanel = sep.join(ps[3:])
else:
print("Parse URL failed. Fallback to default value...")
print("e.g. http://localhost/appname/api/live/fastmot")
socketio_path = 'socket.io' #default
primary_chanel = 'my message' #as in official guideline
#https://stackoverflow.com/questions/66441954/socketio-packet-queue-is-empty-aborting-error-when-sending-message-from-serve
#io_server_url = 'ws://192.168.2.114:3465' #'/taihang/api/live/fastmot'
#socketio_path = "taihang/api/socketio"
#primary_chanel = "live/fastmot"
return io_server_url, socketio_path, transports, primary_chanel
```
#### File: FastMOT/mqtt/abstract_server.py
```python
from abc import ABC, abstractmethod
from queue import Queue
import signal
import os
import json
import time
import calendar
import threading
#import client_config
#import c_log
import logging
logger = logging.getLogger(__name__)
class abstractServer(ABC):
def __init__(self, timer_lapse=200):
super().__init__()
self.queue = Queue()
self.timer_lapse = timer_lapse
self.time_thread = threading.Timer(self.timer_lapse, self.on_timeout)
#signal.signal(signal.SIGINT, self.handler)
def put_msg(self, msg):
# Multiple reading in single message
#for m in msg:
# self.devices[m["nodeId"]].update()
#try:
# n = json.dumps(msg)
#except Exception as e:
# c_log.log_error()
n = msg
self.queue.put(n)
#def handler(self, signum, frame):
# self.stop()
def resetTimer(self, action):
if (action == 'reset'):
self.time_thread.cancel()
self.time_thread = threading.Timer(self.timer_lapse, self.on_timeout)
self.time_thread.start()
def prepare_timeout(self):
self.resetTimer('reset')
@abstractmethod
def on_timeout(self):
pass
def start(self):
pass
def stop(self):
pass
``` |
{
"source": "6DammK9/question_generation",
"score": 2
} |
#### File: 6DammK9/question_generation/pipelines.py
```python
import itertools
import logging
import re
from typing import Optional, Dict, Union
import nltk
# Git PR #35
def _nltk_downloader():
try:
nltk.download('punkt', quiet=True)
except LookupError as e:
print(e)
_nltk_downloader()
import torch
from transformers import(
AutoModelForSeq2SeqLM,
AutoTokenizer,
PreTrainedModel,
PreTrainedTokenizer,
)
logger = logging.getLogger(__name__)
class QGPipeline:
"""Poor man's QG pipeline"""
def __init__(
self,
model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
ans_model: PreTrainedModel,
ans_tokenizer: PreTrainedTokenizer,
qg_format: str,
use_cuda: bool
):
self.model = model
self.tokenizer = tokenizer
self.ans_model = ans_model
self.ans_tokenizer = ans_tokenizer
self.qg_format = qg_format
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
self.model.to(self.device)
if self.ans_model is not self.model:
self.ans_model.to(self.device)
assert self.model.__class__.__name__ in ["T5ForConditionalGeneration", "BartForConditionalGeneration"]
if "T5ForConditionalGeneration" in self.model.__class__.__name__:
self.model_type = "t5"
else:
self.model_type = "bart"
#qa model is not same as qg model!
if "T5ForConditionalGeneration" in self.ans_model.__class__.__name__:
self.ans_model_type = "t5"
else:
self.ans_model_type = "bart"
def __call__(self, inputs: str):
inputs = " ".join(inputs.split())
#Found some other models using different highlight token
hl_token = "<hl>" if self.ans_model_type == "t5" else "[HL]"
if hl_token in inputs:
sents, answers = self._extract_answers_with_hl(inputs, hl_token)
else:
sents, answers = self._extract_answers(inputs)
flat_answers = list(itertools.chain(*answers))
if len(flat_answers) == 0:
return []
if self.qg_format == "prepend":
qg_examples = self._prepare_inputs_for_qg_from_answers_prepend(inputs, answers)
else:
qg_examples = self._prepare_inputs_for_qg_from_answers_hl(sents, answers)
#Length is 0 for some reason
#print("qg_examples", qg_examples)
if len(qg_examples) == 0:
return []
qg_inputs = [example['source_text'] for example in qg_examples]
questions = self._generate_questions(qg_inputs)
output = [{'answer': example['answer'], 'question': que} for example, que in zip(qg_examples, questions)]
return output
def _generate_questions(self, inputs):
inputs = self._tokenize(inputs, padding=True, truncation=True)
outs = self.model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
max_length=32,
num_beams=4,
)
questions = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in outs]
return questions
def _extract_answers_with_hl(self, context, hl_token):
sents = nltk.sent_tokenize(context)
#print("nltk.text", context)
#print("nltk.sents", sents)
#<hl> answer <hl>
#[HL]answer[HL]
#answers [['ans1'], ['ans2']]
#sents ['ctx1', 'ctx2']
answers = []
for sent in sents:
answer_segment = []
answer = sent.split(hl_token)
for i in range(0, len(answer)):
if i % 2 == 1:
answer_segment.append(answer[i].strip())
answers.append(answer_segment)
sents = [sent.replace(hl_token, "") for sent in sents]
#print ("answers", answers)
#print ("sents", sents)
return sents, answers
def _extract_answers(self, context):
sents, inputs = self._prepare_inputs_for_ans_extraction(context)
inputs = self._tokenize(inputs, padding=True, truncation=True)
#CUDA error: CUBLAS_STATUS_NOT_INITIALIZED when calling `cublasCreate(handle)`
#when self.model != self.ans_model:
outs = self.ans_model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
max_length=32,
)
#print("inputs", inputs)
#print("outs", outs)
#print("ans_tokenizer", self.ans_tokenizer)
target_token = '<sep>' if self.ans_model_type == "t5" else self.ans_tokenizer.sep_token #</s>
#print("target_tokenizer", target_token)
#Git Issue 90 solution (nope)
dec = [self.ans_tokenizer.decode(ids, skip_special_tokens=True if self.ans_model_type == "t5" else False) for ids in outs]
#print("dec", dec)
answers = [item.split(target_token) for item in dec]
answers = [i[:-1] for i in answers] if self.ans_model_type == "t5" else [i[1:-1] for i in answers]
#print("answers", answers)
#print("sents", sents)
return sents, answers
def _tokenize(self,
inputs,
padding=True,
truncation=True,
add_special_tokens=True,
max_length=512
):
inputs = self.tokenizer.batch_encode_plus(
inputs,
max_length=max_length,
add_special_tokens=add_special_tokens,
truncation=truncation,
padding="max_length" if padding else False,
pad_to_max_length=padding,
return_tensors="pt"
)
return inputs
def _prepare_inputs_for_ans_extraction(self, text):
sents = nltk.sent_tokenize(text)
#print("nltk.text", text)
#print("nltk.sents", sents)
#Found some other models using different highlight token
hl_token = "<hl>" if self.ans_model_type == "t5" else "[HL]"
inputs = []
for i in range(len(sents)):
source_text = "extract answers:"
for j, sent in enumerate(sents):
if i == j:
sent = "%s %s %s" % (hl_token, sent, hl_token)
source_text = "%s %s" % (source_text, sent)
source_text = source_text.strip()
if self.ans_model_type == "t5":
source_text = source_text + " </s>"
inputs.append(source_text)
#print("inputs", inputs)
return sents, inputs
def _prepare_inputs_for_qg_from_answers_hl(self, sents, answers):
#Found some other models using different highlight token
hl_token = "<hl>" if self.model_type == "t5" else "[HL]"
inputs = []
for i, answer in enumerate(answers):
if len(answer) == 0: continue
for answer_text in answer:
sent = sents[i].lower()
sents_copy = sents[:]
#<pad> <unk>икола <unk>есла
#print("answer_text", answer_text)
#Git PR 65
answer_text = re.sub("<pad> | <pad>", "", answer_text)
answer_text = answer_text.strip().lower()
#print("answer_text", answer_text)
#print("sent", sent)
#Git Issue 90 solution
if answer_text not in sent:
#print("Not found: ", answer_text, sent)
answer_text = answer_text.split(" ")[0]
if answer_text not in sent:
#print("Still not found: ", answer_text, sent)
continue
#Git PR 65
#Git Issue 90 error
try:
ans_start_idx = sent.index(answer_text)
except:
continue
sent = f"{sent[:ans_start_idx]} {hl_token} {answer_text} {hl_token} {sent[ans_start_idx + len(answer_text): ]}"
sents_copy[i] = sent
source_text = " ".join(sents_copy)
#The BART we use is not multitask!
if self.model_type == "t5":
source_text = f"generate question: {source_text}"
source_text = source_text + " </s>"
inputs.append({"answer": answer_text, "source_text": source_text})
return inputs
def _prepare_inputs_for_qg_from_answers_prepend(self, context, answers):
flat_answers = list(itertools.chain(*answers))
examples = []
for answer in flat_answers:
source_text = f"answer: {answer} context: {context}"
if self.model_type == "t5":
source_text = source_text + " </s>"
examples.append({"answer": answer, "source_text": source_text})
return examples
class MultiTaskQAQGPipeline(QGPipeline):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __call__(self, inputs: Union[Dict, str]):
if type(inputs) is str:
# do qg
return super().__call__(inputs)
else:
# do qa
return self._extract_answer(inputs["question"], inputs["context"])
def _prepare_inputs_for_qa(self, question, context):
source_text = f"question: {question} context: {context}"
if self.model_type == "t5":
source_text = source_text + " </s>"
return source_text
def _extract_answer(self, question, context):
source_text = self._prepare_inputs_for_qa(question, context)
inputs = self._tokenize([source_text], padding=False)
outs = self.model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
max_length=16,
)
answer = self.tokenizer.decode(outs[0], skip_special_tokens=True)
return answer
class E2EQGPipeline:
def __init__(
self,
model: PreTrainedModel,
tokenizer: PreTrainedTokenizer,
use_cuda: bool
) :
self.model = model
self.tokenizer = tokenizer
self.device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
self.model.to(self.device)
assert self.model.__class__.__name__ in ["T5ForConditionalGeneration", "BartForConditionalGeneration"]
if "T5ForConditionalGeneration" in self.model.__class__.__name__:
self.model_type = "t5"
else:
self.model_type = "bart"
self.default_generate_kwargs = {
"max_length": 256,
"num_beams": 4,
"length_penalty": 1.5,
"no_repeat_ngram_size": 3,
"early_stopping": True,
}
def __call__(self, context: str, **generate_kwargs):
inputs = self._prepare_inputs_for_e2e_qg(context)
# TODO: when overrding default_generate_kwargs all other arguments need to be passsed
# find a better way to do this
if not generate_kwargs:
generate_kwargs = self.default_generate_kwargs
input_length = inputs["input_ids"].shape[-1]
# max_length = generate_kwargs.get("max_length", 256)
# if input_length < max_length:
# logger.warning(
# "Your max_length is set to {}, but you input_length is only {}. You might consider decreasing max_length manually, e.g. summarizer('...', max_length=50)".format(
# max_length, input_length
# )
# )
outs = self.model.generate(
input_ids=inputs['input_ids'].to(self.device),
attention_mask=inputs['attention_mask'].to(self.device),
**generate_kwargs
)
#print(self.tokenizer)
prediction = self.tokenizer.decode(outs[0], skip_special_tokens=True)
questions = prediction.split("<sep>")
questions = [question.strip() for question in questions[:-1]]
return questions
def _prepare_inputs_for_e2e_qg(self, context):
source_text = f"generate questions: {context}"
if self.model_type == "t5":
source_text = source_text + " </s>"
inputs = self._tokenize([source_text], padding=False)
return inputs
def _tokenize(
self,
inputs,
padding=True,
truncation=True,
add_special_tokens=True,
max_length=512
):
inputs = self.tokenizer.batch_encode_plus(
inputs,
max_length=max_length,
add_special_tokens=add_special_tokens,
truncation=truncation,
padding="max_length" if padding else False,
pad_to_max_length=padding,
return_tensors="pt"
)
return inputs
SUPPORTED_TASKS = {
"question-generation": {
"impl": QGPipeline,
"default": {
"model": "valhalla/t5-small-qg-hl",
"ans_model": "valhalla/t5-small-qa-qg-hl",
}
},
"multitask-qa-qg": {
"impl": MultiTaskQAQGPipeline,
"default": {
"model": "valhalla/t5-small-qa-qg-hl",
}
},
"e2e-qg": {
"impl": E2EQGPipeline,
"default": {
"model": "valhalla/t5-small-e2e-qg",
}
}
}
def pipeline(
task: str,
model: Optional = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
qg_format: Optional[str] = "highlight",
ans_model: Optional = None,
ans_tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
use_cuda: Optional[bool] = True,
**kwargs,
):
# Retrieve the task
if task not in SUPPORTED_TASKS:
raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys())))
targeted_task = SUPPORTED_TASKS[task]
task_class = targeted_task["impl"]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
model = targeted_task["default"]["model"]
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model, str):
tokenizer = model
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1])
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
# Instantiate model if needed
if isinstance(model, str):
model = AutoModelForSeq2SeqLM.from_pretrained(model)
if task == "question-generation":
if ans_model is None:
# load default ans model
ans_model = targeted_task["default"]["ans_model"]
ans_tokenizer = AutoTokenizer.from_pretrained(ans_model)
ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model)
else:
# Try to infer tokenizer from model or config name (if provided as str)
if ans_tokenizer is None:
if isinstance(ans_model, str):
ans_tokenizer = ans_model
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/identifier to a pretrained tokenizer."
)
# Instantiate tokenizer if needed
if isinstance(ans_tokenizer, (str, tuple)):
if isinstance(ans_tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer[0], **ans_tokenizer[1])
else:
ans_tokenizer = AutoTokenizer.from_pretrained(ans_tokenizer)
if isinstance(ans_model, str):
ans_model = AutoModelForSeq2SeqLM.from_pretrained(ans_model)
if task == "e2e-qg":
return task_class(model=model, tokenizer=tokenizer, use_cuda=use_cuda)
elif task == "question-generation":
return task_class(model=model, tokenizer=tokenizer, ans_model=ans_model, ans_tokenizer=ans_tokenizer, qg_format=qg_format, use_cuda=use_cuda)
else:
return task_class(model=model, tokenizer=tokenizer, ans_model=model, ans_tokenizer=tokenizer, qg_format=qg_format, use_cuda=use_cuda)
``` |
{
"source": "6days9weeks/Akio",
"score": 3
} |
#### File: Akio/cogs/actions.py
```python
import random
from typing import Optional
import aiohttp
import discord
import nekos
from discord.ext import commands
from utils.lists import compliments
default = " " # empty string for when no one is passed to hug
class Actions(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.command()
@commands.guild_only()
async def compliment(self, ctx: commands.Context, person: Optional[discord.Member] = None):
person = person or ctx.author
await ctx.send(
embed=discord.Embed(
description=f"{person.mention} {random.choice(compliments)}",
color=ctx.author.color,
).set_footer(text=f"Compliment from {ctx.author}")
)
@commands.command()
@commands.guild_only()
async def hug(self, ctx: commands.Context, *, person=default):
"""Make someones day with a hug"""
async with aiohttp.ClientSession() as session:
async with session.get("https://api.waifu.pics/sfw/hug") as resp:
await ctx.send(
embed=discord.Embed(
color=ctx.author.color, description=f"{ctx.author.mention} hugs {person}"
).set_image(url=(await resp.json())["url"])
)
@commands.command()
@commands.guild_only()
async def pat(self, ctx: commands.Context, *, person=default):
"""Pat someone. Pats are very gud"""
async with aiohttp.ClientSession() as session:
async with session.get("https://api.waifu.pics/sfw/pat") as resp:
await ctx.send(
embed=discord.Embed(
color=ctx.author.color, description=f"{ctx.author.mention} pats {person}"
).set_image(url=(await resp.json())["url"])
)
@commands.command()
@commands.guild_only()
async def kiss(self, ctx: commands.Context, *, person=default):
"""Give that special someone a lil smooch smooch"""
async with aiohttp.ClientSession() as session:
async with session.get("https://api.waifu.pics/sfw/kiss") as resp:
await ctx.send(
embed=discord.Embed(
color=ctx.author.color, description=f"{ctx.author.mention} kisses {person}"
)
.set_image(url=(await resp.json())["url"])
.set_footer(text="ooooooooo 💘")
)
@commands.command()
@commands.guild_only()
async def lick(self, ctx: commands.Context, *, person=default):
"""Ever wanted to lick someone? Here you go."""
async with aiohttp.ClientSession() as session:
async with session.get("https://api.waifu.pics/sfw/lick") as resp:
await ctx.send(
embed=discord.Embed(
color=ctx.author.color, description=f"{ctx.author.mention} licks {person}"
).set_image(url=(await resp.json())["url"])
)
@commands.command()
@commands.guild_only()
async def bully(self, ctx: commands.Context, *, person=default):
"""You a big bad bulli"""
async with aiohttp.ClientSession() as session:
async with session.get("https://api.waifu.pics/sfw/bully") as resp:
await ctx.send(
embed=discord.Embed(
color=ctx.author.color,
description=f"{ctx.author.mention} bullies {person}",
)
.set_image(url=(await resp.json())["url"])
.set_footer(text="oof")
)
@commands.command()
@commands.guild_only()
async def poke(self, ctx: commands.Context, *, person=default):
"""Annoy someone with a lil poke"""
async with aiohttp.ClientSession() as session:
async with session.get("https://api.waifu.pics/sfw/poke") as resp:
await ctx.send(
embed=discord.Embed(
color=ctx.author.color, description=f"{ctx.author.mention} pokes {person}"
).set_image(url=(await resp.json())["url"])
)
@commands.command()
@commands.guild_only()
async def slap(self, ctx: commands.Context, *, person=default):
"""Express your anger with a slap"""
async with aiohttp.ClientSession() as session:
async with session.get("https://api.waifu.pics/sfw/slap") as resp:
await ctx.send(
embed=discord.Embed(
color=ctx.author.color, description=f"{ctx.author.mention} slaps {person}"
)
.set_image(url=(await resp.json())["url"])
.set_footer(text="Ouchie")
)
@commands.command()
@commands.guild_only()
async def smug(self, ctx: commands.Context):
"""You feeling smug? :face_with_raised_eyebrow:"""
async with aiohttp.ClientSession() as session:
async with session.get("https://api.waifu.pics/sfw/smug") as resp:
await ctx.send(
embed=discord.Embed(
color=ctx.author.color,
description=f"{ctx.author.mention} has a smug look on their face",
).set_image(url=(await resp.json())["url"])
)
@commands.command()
@commands.guild_only()
async def tickle(self, ctx, *, person=default):
"""Tickle Tickle Tickle :)"""
tickle = nekos.img("tickle")
embed = discord.Embed(color=0xFFB6C1)
embed.description = f"{ctx.author.mention} tickles {person}"
embed.set_image(url=tickle)
await ctx.send(embed=embed)
@commands.command()
@commands.guild_only()
async def baka(self, ctx, *, person=default):
"""Onii-san anata BAKA!"""
baka = nekos.img("baka")
embed = discord.Embed(color=0xFFB6C1)
embed.description = f"{ctx.author.mention} calls {person} a baka"
embed.set_image(url=baka)
await ctx.send(embed=embed)
@commands.command()
@commands.guild_only()
async def feed(self, ctx, *, person=default):
"""Give our lil friend sum to eat"""
feed = nekos.img("feed")
embed = discord.Embed(color=0xFFB6C1)
embed.description = f"{ctx.author.mention} feeds {person}"
embed.set_image(url=feed)
embed.set_footer(text="Eat Up!")
await ctx.send(embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(Actions(bot))
``` |
{
"source": "6days9weeks/Kurisu",
"score": 3
} |
#### File: Kurisu/cogs/facts.py
```python
import discord
import aiohttp
from discord.ext import commands
class Facts(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def catfact(self, ctx):
"""Get a random cat fact"""
async with aiohttp.ClientSession() as cs:
async with cs.get("https://some-random-api.ml/facts/cat") as r:
await ctx.send((await r.json())["fact"])
@commands.command()
async def dogfact(self, ctx):
"""Get a random dog fact"""
async with aiohttp.ClientSession() as cs:
async with cs.get("https://some-random-api.ml/facts/dog") as r:
await ctx.send((await r.json())["fact"])
@commands.command()
async def pandafact(self, ctx):
"""Get a random panda fact"""
async with aiohttp.ClientSession() as cs:
async with cs.get("https://some-random-api.ml/facts/panda") as r:
await ctx.send((await r.json())["fact"])
@commands.command()
async def birdfact(self, ctx):
"""Get a random bird fact"""
async with aiohttp.ClientSession() as cs:
async with cs.get("https://some-random-api.ml/facts/bird") as r:
await ctx.send((await r.json())["fact"])
@commands.command()
async def koalafact(self, ctx):
async with aiohttp.ClientSession() as cs:
async with cs.get("https://some-random-api.ml/facts/koala") as r:
await ctx.send(await r.json()["url"])
def setup(bot):
bot.add_cog(Facts(bot))
```
#### File: Kurisu/cogs/moderation.py
```python
import discord
import typing
from utils.misc import check_hierachy
from discord.ext import commands
class Moderation(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.guild_only()
@commands.has_permissions(kick_members=True)
async def kick(self, ctx, member: discord.Member, reason=None):
"""Kicks a user"""
if await check_hierachy(ctx, member):
return
try:
await member.kick(reason=f"{reason} - {ctx.author.name}")
await ctx.send(f"⚠️{member.name} was kicked for {reason}")
await member.send(f"⚠️You were kicked from {ctx.guild} for {reason}")
except Exception as e:
await ctx.send(e)
@commands.command()
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.guild_only()
@commands.has_permissions(ban_members=True)
async def ban(self, ctx, member: discord.Member, *, reason=None):
"""Bans a user"""
if await check_hierachy(ctx, member):
return
try:
await member.ban(reason=f"{reason} - {ctx.author.name}")
await ctx.send(f"🔴{member.name} was banned for {reason}")
await member.send(f"🔴You were banned from {ctx.guild} for {reason}")
except Exception as e:
await ctx.send(e)
@commands.command()
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.guild_only()
@commands.has_permissions(manage_nicknames=True)
async def nickname(self, ctx, member: discord.Member, *, nickname=None):
"""Nicknames a user"""
if await check.hierachy(ctx, member):
return
try:
if nickname is None:
await member.edit(nick=member.name)
await ctx.send(f"{ctx.author.name} your nickname was reset")
else:
await member.edit(nick=nickname)
await ctx.send(f"{member.name}'s nickname was changed to {nickname}")
except Exception as e:
await ctx.send(e)
@commands.command()
@commands.cooldown(1, 3, commands.BucketType.user)
@commands.guild_only()
@commands.has_permissions(ban_members=True)
async def unban(self, ctx, id: int = None):
if id is None:
await ctx.send("Please pass in a ID")
else:
try:
user = await self.bot.fetch_user(id)
await ctx.guild.unban(user)
await ctx.send(f"🟢Successfully unbanned `{user}`")
except Exception as e:
await ctx.send(e)
@commands.command()
async def purge(self, ctx, limit=0):
if limit == 0:
await ctx.send("Please pass in a valid amount to purge.")
else:
await ctx.channel.purge(limit=limit + 1)
await ctx.send(f"Done. {limit} messages deleted", delete_after=5)
def setup(bot):
bot.add_cog(Moderation(bot))
```
#### File: cogs/wss/menu.py
```python
import datetime
from io import BytesIO
import discord
import matplotlib.pyplot as plt
from discord.ext import menus
def box(text: str, lang: str = "") -> str:
"""Get the given text in a code block.
Parameters
----------
text : str
The text to be marked up.
lang : `str`, optional
The syntax highlighting language for the codeblock.
Returns
-------
str
The marked up text.
"""
ret = "```{}\n{}\n```".format(lang, text)
return ret
class WSStatsMenu(menus.MenuPages, inherit_buttons=False):
def __init__(
self,
source: menus.PageSource,
header: str,
timeout: int = 30,
image: BytesIO = None,
):
super().__init__(
source,
timeout=timeout,
clear_reactions_after=True,
delete_message_after=True,
)
self.header = header
self.image = image
def should_add_reactions(self):
return True
def not_paginating(self):
return not self._source.is_paginating()
async def send_initial_message(self, ctx, channel):
page = await self._source.get_page(0)
kwargs = await self._get_kwargs_from_page(page)
msg = await channel.send(
**kwargs,
file=discord.File(self.image, filename="chart.png") if self.image else None,
)
if self.image:
self.image.close()
return msg
async def finalize(self, timed_out):
"""|coro|
A coroutine that is called when the menu loop has completed
its run. This is useful if some asynchronous clean-up is
required after the fact.
Parameters
--------------
timed_out: :class:`bool`
Whether the menu completed due to timing out.
"""
if timed_out and self.delete_message_after:
self.delete_message_after = False
async def go_to_first_page(self, payload):
"""go to the first page"""
await self.show_page(0)
@menus.button(
"\N{BLACK LEFT-POINTING TRIANGLE}\ufe0f",
position=menus.First(1),
skip_if=not_paginating,
)
async def go_to_previous_page(self, payload):
"""go to the previous page"""
await self.show_checked_page(self.current_page - 1)
@menus.button(
"\N{BLACK RIGHT-POINTING TRIANGLE}\ufe0f",
position=menus.Last(0),
skip_if=not_paginating,
)
async def go_to_next_page(self, payload):
"""go to the next page"""
await self.show_checked_page(self.current_page + 1)
@menus.button("\N{CROSS MARK}", position=menus.First(2))
async def stop_pages(self, payload: discord.RawReactionActionEvent) -> None:
self.stop()
class WSStatsPager(menus.AsyncIteratorPageSource):
def __init__(self, entries, add_image: bool = False):
self.add_image = add_image
super().__init__(entries, per_page=1)
async def format_page(self, wsmenu: WSStatsMenu, page):
e = discord.Embed(
title=wsmenu.header,
description=box(page, "ml"),
color=0xFFCDC2,
timestamp=datetime.datetime.utcnow(),
)
if self.add_image:
e.set_image(url="attachment://chart.png")
e.set_footer(text=f"Page {wsmenu.current_page + 1}")
return e
def create_counter_chart(data, title: str):
plt.clf()
most_common = data.most_common()
total = sum(data.values())
sizes = [(x[1] / total) * 100 for x in most_common][:20]
labels = [
f"{round(sizes[index], 1):.2f}% {x[0]}"
for index, x in enumerate(most_common[:20])
]
if len(most_common) > 20:
others = sum([x[1] / total for x in most_common[20:]])
sizes.append(others)
labels.append("{:.2f}% Others".format(others))
title = plt.title(title, color="white")
title.set_va("top")
title.set_ha("center")
plt.gca().axis("equal")
colors = [
"r",
"darkorange",
"gold",
"y",
"olivedrab",
"green",
"darkcyan",
"mediumblue",
"darkblue",
"blueviolet",
"indigo",
"orchid",
"mediumvioletred",
"crimson",
"chocolate",
"yellow",
"limegreen",
"forestgreen",
"dodgerblue",
"slateblue",
"gray",
]
pie = plt.pie(sizes, colors=colors, startangle=0)
plt.legend(
pie[0],
labels,
bbox_to_anchor=(0.7, 0.5),
loc="center",
fontsize=10,
bbox_transform=plt.gcf().transFigure,
facecolor="#ffffff",
)
plt.subplots_adjust(left=0.0, bottom=0.1, right=0.45)
image_object = BytesIO()
plt.savefig(image_object, format="PNG", facecolor="#36393E")
image_object.seek(0)
return image_object
```
#### File: Kurisu/utils/misc.py
```python
import discord
from discord.ext import commands
from datetime import timedelta
from datetime import datetime
# Credit Goes To crazygmr101/aoi-bot
def time_notation(td: timedelta, sep="", full=False):
hours = td.seconds // 3600
minutes = (td.seconds % 3600) // 60
return sep.join(
[
f"{td.days}{'days' if full else 'd '}",
f"{hours}{'hours' if full else 'h '}",
f"{minutes}{'minutes' if full else 'm '}",
]
)
bot_start_time = datetime.now()
async def check_hierachy(ctx, member):
try:
if ctx.author.id == ctx.guild.owner.id:
return False
elif member == ctx.author:
return await ctx.send(f"You can't {ctx.command.name} yourself lmao")
elif member.id == ctx.bot.user.id:
return await ctx.send(
"You'd really use my own moderation commands on me. hmph"
)
elif member == ctx.guild.owner:
return await ctx.send(f"You can't {ctx.command.name} the owner lmao")
elif ctx.author.top_role <= member.top_role:
return await ctx.send(
"You cant use this command on someone equal or higher than yourself"
)
except Exception as e:
pass
``` |
{
"source": "6days9weeks/Novus",
"score": 3
} |
#### File: utils/checks/is_bot_support.py
```python
from discord.ext import commands
import discord
class NotBotSupport(commands.MissingRole):
"""
The generic error for the bot failing the :func:`voxelbotutils.checks.is_bot_support` check -
is a subclass of :class:`discord.ext.commands.MissingRole`.
"""
def __init__(self):
super().__init__("Bot Support Team")
def is_bot_support():
"""
Checks whether or not the calling user has the bot support role, as defined in the bot's configuration
file (:attr:`config.bot_support_role_id`). As it checks a role ID, this will only work it the command in quesiton is called
in a guild where the calling user *has* the given role.
Raises:
NotBotSupport: If the given user isn't a member of the bot's support team.
"""
async def predicate(ctx: commands.Context):
if ctx.author.id in ctx.bot.owner_ids:
return True
support_guild = await ctx.bot.fetch_support_guild()
if support_guild is None:
raise NotBotSupport()
try:
member = support_guild.get_member(ctx.author.id) or await support_guild.fetch_member(ctx.author.id)
if member is None:
raise AttributeError()
except (discord.HTTPException, AttributeError):
raise NotBotSupport()
if ctx.bot.config.get("bot_support_role_id", None) in [i.id for i in member.roles] or ctx.author.id in ctx.bot.owner_ids:
return True
raise NotBotSupport()
return commands.check(predicate)
```
#### File: utils/checks/is_config_set.py
```python
from discord.ext import commands
class ConfigNotSet(commands.DisabledCommand):
"""
This is a subclass of :class:`discord.ext.commands.DisabledCommand` raised exclusively by the
:func:`is_config_set<voxelbotutils.checks.is_config_set>` check. For normal users, this should just say
that the command is disabled.
"""
def is_config_set(*config_keys):
"""
Checks that your config has been set given the keys for the item. Items are run as `__getitem__`s
for the following item. So for a config where you want to check that `config["api_keys"]["example"]`
has been set, you would write your check as `is_config_set("api_keys", "example")`.
Raises:
ConfigNotSet: If the config item hasn't been set for the bot.
"""
def predicate(ctx: commands.Context):
working_config = ctx.bot.config
try:
for key in config_keys:
working_config = working_config[key]
except (KeyError, TypeError):
raise ConfigNotSet()
if not working_config:
ctx.bot.logger.warning(f"No config is set for {'.'.join(config_keys)}")
raise ConfigNotSet()
return True
return commands.check(predicate)
```
#### File: cogs/utils/component_check.py
```python
import asyncio
from typing import Union, Callable, Optional
import discord
def component_check(
user: Union[discord.User, discord.Member],
message: discord.Message,
no_interact_message: Optional[str] = discord.utils.MISSING) -> Callable[[discord.Interaction], bool]:
"""
A check for a wait_for that allows only a user to interact with the given
button, outputting the no interaction message.
.. versionadded:: 0.6.6
Parameters
----------
user : Union[discord.User, discord.Member]
The user who's allowed to interact with the message.
message : discord.Message
The message that the user is allowed to interact with.
no_interact_message : Optional[str]
The content that's output when a non-valid user interacts with the button.
.. versionchanged:: 0.7.0
You can now disable a response being sent by passing ``None`` to this parameter. If you do, a deferred
update will still be sent.
Returns
-------
Callable[[discord.Interaction], bool]
A callable check for interaction events where only the supplied user is allowed to interact.
"""
if no_interact_message == discord.utils.MISSING:
no_interact_message = f"Only {user.mention} can interact with this component."
def check(payload: discord.Interaction):
assert payload.message
assert payload.user
if payload.message.id != message.id:
return False
if payload.user.id != user.id:
loop = asyncio.get_event_loop()
if no_interact_message:
loop.create_task(payload.response.send_message(
no_interact_message,
ephemeral=True,
allowed_mentions=discord.AllowedMentions.none(),
))
else:
loop.create_task(payload.response.defer_update())
return False
return True
return check
```
#### File: utils/converters/boolean_converter.py
```python
import asyncio
import discord
from discord.ext import commands
class BooleanConverter(commands.Converter):
"""
Converts the given input into a boolean yes/no, defaulting to "no" if something couldn't be
properly converted rather than raising an error.
"""
TICK_EMOJIS = [
"\N{HEAVY CHECK MARK}",
"\N{HEAVY MULTIPLICATION X}",
]
@classmethod
async def add_tick_emojis(cls, message: discord.Message):
"""
Add boolean reactions to the given message.
"""
for e in cls.TICK_EMOJIS:
await message.add_reaction(e)
@classmethod
def add_tick_emojis_non_async(cls, message: discord.Message):
"""
Add boolean reactions to the given message as a non-awaitable.
"""
return asyncio.Task(cls.add_tick_emojis(message))
@classmethod
async def convert(cls, ctx, argument):
return any([
argument.lower() in ['y', 'yes', 'true', 'definitely', 'ye', 'ya', 'yas', 'ok', 'okay', '1', 't'],
argument in ['\N{HEAVY CHECK MARK}', '<:tick_yes:596096897995899097>'],
])
```
#### File: utils/converters/filtered_user.py
```python
from discord.ext import commands
class FilteredUser(commands.UserConverter):
"""
A simple :class:`discord.ext.commands.UserConverter` that doesn't allow bots
or the author to be passed into the function.
"""
def __init__(self, *, allow_author: bool = False, allow_bots: bool = False):
super().__init__()
self.allow_author = allow_author
self.allow_bots = allow_bots
async def convert(self, ctx: commands.Context, argument: str):
m = await super().convert(ctx, argument)
if self.allow_author is False and ctx.author.id == m.id:
raise commands.BadArgument("You can't run this command on yourself.")
if self.allow_bots is False and m.bot:
raise commands.BadArgument("You can't run this command on bots.")
return m
class FilteredMember(commands.MemberConverter):
"""
A simple :class:`discord.ext.commands.MemberConverter` that doesn't allow bots
or the author to be passed into the function.
"""
def __init__(self, *, allow_author: bool = False, allow_bots: bool = False):
super().__init__()
self.allow_author = allow_author
self.allow_bots = allow_bots
async def convert(self, ctx: commands.Context, argument: str):
m = await super().convert(ctx, argument)
if self.allow_author is False and ctx.author.id == m.id:
raise commands.BadArgument("You can't run this command on yourself.")
if self.allow_bots is False and m.bot:
raise commands.BadArgument("You can't run this command on bots.")
return m
```
#### File: cogs/utils/custom_command.py
```python
from discord.ext import commands
from .custom_cog import Cog
class Command(commands.Command):
def __init__(self, *args, **kwargs):
super().__init__(*args, cooldown_after_parsing=kwargs.pop('cooldown_after_parsing', True), **kwargs)
class Group(commands.Group):
def __init__(self, *args, **kwargs):
super().__init__(*args, cooldown_after_parsing=kwargs.pop('cooldown_after_parsing', True), **kwargs)
def group(self, *args, **kwargs):
kwargs.setdefault('cls', Group)
kwargs.setdefault('case_insensitive', self.case_insensitive)
return super().group(*args, **kwargs)
def command(self, *args, **kwargs):
kwargs.setdefault('cls', Command)
return super().command(*args, **kwargs)
```
#### File: utils/database/mysql.py
```python
from __future__ import annotations
import typing
import aiomysql
from .types import DriverWrapper
if typing.TYPE_CHECKING:
from .types import UserDatabaseConfig, DatabaseConfig
from .model import DatabaseWrapper, DatabaseTransaction
class MysqlDatabaseWrapper(DatabaseWrapper):
config: UserDatabaseConfig
pool: aiomysql.pool.Pool
conn: typing.Optional[aiomysql.Connection]
cursor: aiomysql.Cursor
caller: aiomysql.Cursor
class MysqlDatabaseTransaction(DatabaseTransaction):
parent: MysqlDatabaseWrapper
_transaction: None
is_active: bool
commit_on_exit: bool
class MysqlWrapper(DriverWrapper):
@staticmethod
async def create_pool(config: DatabaseConfig) -> aiomysql.Pool:
return await aiomysql.create_pool(**config, autocommit=True)
@staticmethod
async def get_connection(dbw: typing.Type[MysqlDatabaseWrapper]) -> MysqlDatabaseWrapper:
connection: aiomysql.Connection = await dbw.pool.acquire()
cursor = await connection.cursor(aiomysql.DictCursor)
v = dbw(
conn=connection,
cursor=cursor,
)
v.is_active = True
return v
@staticmethod
async def release_connection(dbw: MysqlDatabaseWrapper) -> None:
assert dbw.conn
assert dbw.cursor
await dbw.cursor.close()
await dbw.pool.release(dbw.conn)
dbw.conn = None
dbw.is_active = False
@classmethod
async def start_transaction(cls, tra: MysqlDatabaseTransaction):
assert tra.parent.conn
await tra.parent.conn.begin()
@staticmethod
async def commit_transaction(tra: MysqlDatabaseTransaction) -> None:
assert tra.parent.conn
await tra.parent.conn.commit()
@staticmethod
async def rollback_transaction(tra: MysqlDatabaseTransaction) -> None:
assert tra.parent.conn
await tra.parent.conn.rollback()
@staticmethod
async def fetch(dbw: MysqlDatabaseWrapper, sql: str, *args) -> typing.List[typing.Any]:
await dbw.caller.execute(sql, args)
data = await dbw.caller.fetchall()
return data or list()
@staticmethod
async def executemany(dbw: MysqlDatabaseWrapper, sql: str, *args_list) -> None:
assert dbw.conn
await dbw.caller.executemany(sql, args_list)
def prepare(self) -> typing.Generator[str, None, None]:
while True:
yield "%s"
```
#### File: utils/database/sqlite_.py
```python
from __future__ import annotations
import typing
import aiosqlite
from .types import DriverWrapper
if typing.TYPE_CHECKING:
from .types import UserDatabaseConfig, DatabaseConfig
from .model import DatabaseWrapper, DatabaseTransaction
class SQLiteDatabaseWrapper(DatabaseWrapper):
config: UserDatabaseConfig
pool: None
conn: typing.Optional[aiosqlite.Connection]
cursor: typing.Optional[aiosqlite.Cursor]
caller: aiosqlite.Connection
class SQLiteDatabaseTransaction(DatabaseTransaction):
parent: SQLiteDatabaseWrapper
_transaction: None
is_active: bool
commit_on_exit: bool
class RowWrapper(aiosqlite.Row):
def values(self):
for i in self.keys():
yield self[i]
def items(self):
for i in self.keys():
yield (i, self[i])
class SQLiteWrapper(DriverWrapper):
@staticmethod
async def create_pool(config: DatabaseConfig) -> None:
return None
@staticmethod
async def get_connection(dbw: typing.Type[SQLiteDatabaseWrapper]) -> SQLiteDatabaseWrapper:
connection = await aiosqlite.connect(dbw.config.get("database"))
connection.row_factory = RowWrapper
v = dbw(
conn=connection,
)
v.is_active = True
return v
@staticmethod
async def release_connection(dbw: SQLiteDatabaseWrapper) -> None:
assert dbw.conn
await dbw.conn.close()
if dbw.cursor:
try:
await dbw.cursor.close()
except ValueError:
pass
dbw.cursor = None
dbw.conn = None
dbw.is_active = False
@staticmethod
async def fetch(dbw: SQLiteDatabaseWrapper, sql: str, *args) -> typing.List[typing.Any]:
if dbw.cursor:
try:
await dbw.cursor.close()
except ValueError:
pass
dbw.cursor = None
cursor: aiosqlite.Cursor = await dbw.caller.execute(sql, args)
await dbw.conn.commit()
dbw.cursor = cursor
return await cursor.fetchall() or list()
@staticmethod
async def executemany(dbw: SQLiteDatabaseWrapper, sql: str, *args_list) -> None:
assert dbw.conn
await dbw.caller.executemany(sql, args_list)
def prepare(self) -> typing.Generator[str, None, None]:
while True:
yield "?"
```
#### File: utils/database/types.py
```python
from __future__ import annotations
import typing
from .model import DatabaseTransaction
if typing.TYPE_CHECKING:
from .model import DatabaseWrapper
class DatabaseConfig(typing.TypedDict):
enabled: bool
host: str
port: int
database: str
user: str
password: str
class UserDatabaseConfig(DatabaseConfig):
enabled: bool
class DriverFetchConnection(typing.Protocol):
async def fetch(self):
raise NotImplementedError()
class DriverExecuteConnection(typing.Protocol):
async def execute(self):
raise NotImplementedError()
DriverConnection = typing.Union[DriverFetchConnection, DriverExecuteConnection]
class DriverPool(typing.Protocol):
async def acquire(self) -> DriverConnection:
raise NotImplementedError()
async def release(self, connection: DriverConnection) -> None:
raise NotImplementedError()
async def close(self) -> None:
raise NotImplementedError()
class DriverWrapper(typing.Protocol):
@staticmethod
async def create_pool(config: DatabaseConfig) -> DriverPool:
"""Connect to your database driver using the given config."""
raise NotImplementedError()
@staticmethod
async def get_connection(dbw: typing.Type[DatabaseWrapper]) -> DatabaseWrapper:
"""Get a connection from the database pool and return a wrapper around the given connection."""
raise NotImplementedError()
@staticmethod
async def release_connection(dbw: DatabaseWrapper) -> None:
"""Release the connection back into the pool."""
raise NotImplementedError()
@classmethod
def transaction(cls: typing.Type[DriverWrapper], dbw: DatabaseWrapper, *, commit_on_exit: bool = True):
"""Make a transaction instance with the connection's current instance."""
return DatabaseTransaction(cls, dbw, commit_on_exit=commit_on_exit)
@staticmethod
async def start_transaction(dbw: DatabaseTransaction) -> None:
"""Start a transaction from the transaction wrapper."""
raise NotImplementedError()
@staticmethod
async def commit_transaction(dbw: DatabaseTransaction) -> None:
"""Commit the transaction from the wrapper."""
raise NotImplementedError()
@staticmethod
async def rollback_transaction(dbw: DatabaseTransaction) -> None:
"""Rollback the commits from the transaction."""
raise NotImplementedError()
@staticmethod
async def fetch(dbw: DatabaseWrapper, sql: str, *args: typing.Any) -> typing.List[typing.Any]:
"""Run some SQL in your database."""
raise NotImplementedError()
@staticmethod
async def executemany(dbw: DatabaseWrapper, sql: str, *args_list: typing.Iterable[typing.Any]) -> None:
"""Run some SQL in your database."""
raise NotImplementedError()
```
#### File: cogs/utils/__init__.py
```python
import re as _re
import gettext as _gettext
import typing as _typing
import discord as _discord
from discord.ext import commands as _dpy_commands
from . import checks, converters, errors, menus, types
from .context_embed import Embed
from .custom_bot import MinimalBot, Bot
from .custom_cog import Cog
from .custom_command import Command, Group
from .custom_context import Context, AbstractMentionable, PrintContext, SlashContext
from .database import DatabaseWrapper, DatabaseTransaction
from .redis import RedisConnection, RedisChannelHandler, redis_channel_handler
from .statsd import StatsdConnection
from .time_value import TimeValue
from .paginator import Paginator
from .help_command import HelpCommand
from .string import Formatter
from .component_check import component_check
from .embeddify import Embeddify
from .twitch_stream import TwitchStream
def command(*args, **kwargs):
return _dpy_commands.command(*args, cls=Command, **kwargs)
def group(*args, **kwargs):
if 'case_insensitive' not in kwargs:
kwargs['case_insensitive'] = True
return _dpy_commands.group(*args, cls=Group, **kwargs)
_html_minifier = _re.compile(r"\s{2,}|\n")
def minify_html(text: str) -> str:
return _html_minifier.sub("", text)
def translation(
ctx: _typing.Union[_dpy_commands.Context, _discord.Interaction, _discord.Locale, str],
domain: str,
*,
use_guild: bool = False,
**kwargs,
) -> _typing.Union[_gettext.GNUTranslations, _gettext.NullTranslations]:
"""
Get a translation table for a given domain with the locale
stored in a context.
Examples
----------
>>> # This will get the locale from your context,
>>> # and will get the translation from the "errors" file.
>>> vbu.translation(ctx, "errors").gettext("This command is currently unavailable")
Parameters
-----------
ctx: Union[:class:`discord.ext.commands.Context`, :class:`discord.Interaction`, :class:`discord.Locale`, :class:`str`]
The context that you want to get the translation within, or
the name of the locale that you want to get anyway.
domain: :class:`str`
The domain of the translation.
use_guild: :class:`bool`
Whether or not to prioritize the guild locale over the user locale.
Returns
--------
Union[:class:`gettext.GNUTranslations`, :class:`gettext.NullTranslations`]
The transation table object that you want to ``.gettext`` for.
"""
if isinstance(ctx, (_dpy_commands.Context, _discord.Interaction)):
languages = [ctx.locale, ctx.locale.split("-")[0]]
if use_guild and ctx.guild and ctx.guild_locale:
languages = [ctx.guild_locale, ctx.guild_locale.split("-")[0], *languages]
elif isinstance(ctx, _discord.Locale):
languages = [ctx.value, ctx.value.split("-")[0]]
elif isinstance(ctx, str):
languages = [ctx]
else:
raise TypeError()
return _gettext.translation(
domain=domain,
localedir=kwargs.get("localedir", "./locales"),
languages=languages,
fallback=kwargs.get("fallback", True),
)
_formatter = Formatter()
format = _formatter.format
embeddify = Embeddify.send
DatabaseConnection = DatabaseWrapper
Database = DatabaseWrapper
Redis = RedisConnection
Stats = StatsdConnection
```
#### File: utils/menus/menu.py
```python
from __future__ import annotations
import asyncio
from typing import (
TYPE_CHECKING,
Callable,
Dict,
Tuple,
Type,
TypeVar,
Awaitable,
Union,
Optional,
List,
Iterable,
Any,
overload,
)
import inspect
import uuid
import discord
from discord.ext import commands
from .errors import ConverterTimeout
from .option import Option
from .mixins import MenuDisplayable
from .callbacks import MenuCallbacks
from .converter import Converter
from ..custom_cog import Cog
from ..custom_command import Command
from ..custom_bot import Bot
if TYPE_CHECKING:
from ..custom_context import Context, SlashContext
ContextCallable = Callable[[Context], None]
AwaitableContextCallable = Callable[[Context], Awaitable[None]]
MaybeCoroContextCallable = Union[ContextCallable, AwaitableContextCallable]
AnyContext = Union[Context, SlashContext]
AnyBot = Union[discord.Client, commands.Bot, Bot]
T = TypeVar("T")
@overload
def _do_nothing(return_value: Type[T]) -> Callable[[], T]:
...
@overload
def _do_nothing(return_value=None) -> Callable[[], None]:
...
def _do_nothing(return_value: Optional[Type[T]] = None) -> Callable[[], Optional[T]]:
def wrapper(*args, **kwargs) -> Optional[T]:
if return_value:
return return_value()
return return_value
return wrapper
class Menu(MenuDisplayable):
"""
A menu using components that's meant to ease up the process of doing settings within your bot.
"""
callbacks = MenuCallbacks
def __init__(
self,
*options: Option,
display: Optional[str] = None,
component_display: Optional[str] = None):
"""
Parameters
----------
*options : Option
A list of options that are inside the menu.
display : Optional[Optional[str]]
When this menu itself is an option, this is the text that is
displayed on the parent menu.
component_display : Optional[Optional[str]]
When this menu itself is an option, this is the text that is
displayed for the parent menu's component.
"""
self.display: Optional[str] = display # Used for nested menus
self.component_display: Optional[str] = component_display # Used for nested menus
self._options = list(options)
@overload
def create_cog(
self,
bot: None = None,
*,
cog_name: str = "Bot Settings",
name: str = "settings",
aliases: List[str] = ["setup"],
permissions: Optional[List[str]] = None,
post_invoke: Optional[MaybeCoroContextCallable] = None,
guild_only: bool = True,
**command_kwargs) -> Type[commands.Cog]:
...
@overload
def create_cog(
self,
bot: AnyBot = ...,
*,
cog_name: str = "Bot Settings",
name: str = "settings",
aliases: List[str] = ["setup"],
permissions: Optional[List[str]] = None,
post_invoke: Optional[MaybeCoroContextCallable] = None,
guild_only: bool = True,
**command_kwargs) -> commands.Cog:
...
def create_cog(
self,
bot: Optional[AnyBot] = None,
*,
cog_name: str = "Bot Settings",
name: str = "settings",
aliases: List[str] = ["setup"],
permissions: Optional[List[str]] = None,
post_invoke: Optional[MaybeCoroContextCallable] = None,
guild_only: bool = True,
**command_kwargs
) -> Union[commands.Cog, Type[commands.Cog]]:
"""
Creates a cog that can be loaded into the bot in a setup method.
Parameters
----------
bot : Optional[Bot]
The bot object. If given, the cog will be instantiated with that object.
cog_name : Optional[str]
The name of the cog to be added.
name : Optional[str]
The name of the command to be added.
aliases : Optional[List[str]]
A list of aliases to be added to the settings command.
permissions : Optional[List[str]]
A list of permission names should be required for the command run.
post_invoke : Optional[MaybeCoroContextCallable]
A post-invoke method that can be called.
guild_only : Optional[bool]
If the command should be guild-only.
**command_kwargs
Arguments to be passed down to the command decorator.
Returns
-------
Union[commands.Cog, Type[commands.Cog]]
Either a cog type to add to your bot, or if a bot instance was passed
as a parameter, the added cog instance.
"""
permissions = permissions if permissions is not None else ["manage_guild"]
meta = commands.ApplicationCommandMeta(guild_only=guild_only)
class NestedCog(Cog, name=cog_name):
def __init__(nested_self, bot):
super().__init__(bot)
if guild_only:
nested_self.settings.add_check(commands.guild_only().predicate)
def cog_unload(nested_self):
nested_self.bot.remove_command(nested_self.settings.name)
super().cog_unload()
@commands.command(
cls=Command,
name=name,
aliases=aliases,
application_command_meta=command_kwargs.pop("application_command_meta", meta),
**command_kwargs,
)
# @commands.defer()
@commands.has_permissions(**{i: True for i in permissions})
@commands.bot_has_permissions(send_messages=True, embed_links=True)
async def settings(nested_self, ctx):
"""
Modify some of the bot's settings.
"""
# Make sure it's a slashie
if not isinstance(ctx, commands.SlashContext):
return await ctx.send("This command can only be run as a slash command.")
await ctx.interaction.response.send_message("Loading menu...")
# Get a guild if we need to
if ctx.interaction.guild_id:
guild = await ctx.bot.fetch_guild(ctx.interaction.guild_id)
channels = await guild.fetch_channels()
guild._channels = {i.id: i for i in channels} # Fetching a guild doesn't set channels :/
ctx._guild = guild
# Start the menu
await self.start(ctx)
# Post invoke
if post_invoke is None:
return
if inspect.iscoroutine(post_invoke):
await post_invoke(ctx)
else:
post_invoke(ctx)
if bot:
return NestedCog(bot)
return NestedCog
async def get_options(
self,
ctx: commands.SlashContext,
force_regenerate: bool = False) -> List[Option]:
"""
Get all of the options for an instance.
This method has an open database instance in :code:`ctx.database`.
"""
return self._options
async def start(
self,
ctx: commands.SlashContext,
delete_message: bool = False) -> None:
"""
Run the menu instance.
Parameters
----------
ctx : vbu.SlashContext
A context object to run the settings menu from.
delete_message : Optional[bool]
Whether or not to delete the menu message when the menu is
completed.
"""
# Set up our base case
component_custom_id: str = str(uuid.uuid4())
sendable_data: dict = await self.get_sendable_data(ctx, component_custom_id)
sent_components: discord.ui.MessageComponents = sendable_data['components']
component_custom_ids: List[str] = []
# Send the initial message
if not isinstance(ctx, commands.SlashContext):
await ctx.send(**sendable_data) # No interaction? Somehow?
elif ctx.interaction.response.is_done:
await ctx.interaction.edit_original_message(**sendable_data)
else:
await ctx.interaction.response.edit_message(**sendable_data)
# Set up a function so as to get
def get_button_check(valid_ids: List[str]):
def button_check(payload: discord.Interaction):
if payload.custom_id not in valid_ids:
return False
if payload.user.id == ctx.interaction.user.id:
return True
ctx.bot.loop.create_task(payload.response.send_message(
f"Only {ctx.interaction.user.mention} can interact with these buttons.",
ephemeral=True,
))
return False
return button_check
# Keep looping while we're expecting a user input
while True:
# Get the valid custom IDs for this menu
component_custom_ids.clear()
for ar in sent_components.components:
for co in ar.components:
component_custom_ids.append(co.custom_id)
# Wait for the user to click on a button
try:
payload: discord.Interaction = await ctx.bot.wait_for(
"component_interaction",
check=get_button_check(component_custom_ids),
timeout=60.0,
)
await payload.response.defer_update()
ctx.interaction = payload
except asyncio.TimeoutError:
break
# From this point onwards in the loop, we'll always have an interaction
# within the context object.
# Determine the option they clicked for
clicked_option = None
options = await self.get_options(ctx)
for i in options:
if i._component_custom_id == payload.custom_id:
clicked_option = i
break
if clicked_option is None:
break
# Run the given option
# This may change the interaction object within the context,
# but at all points it should be deferred (update)
if isinstance(clicked_option._callback, Menu):
await clicked_option._callback.start(ctx)
else:
await clicked_option.run(ctx)
# Edit the message with our new buttons
sendable_data = await self.get_sendable_data(ctx, component_custom_id)
if ctx.interaction.response.is_done:
await ctx.interaction.edit_original_message(**sendable_data)
else:
await ctx.interaction.response.edit_message(**sendable_data)
# Disable the buttons before we leave
try:
if delete_message:
await ctx.interaction.delete_original_message()
else:
await ctx.interaction.edit_original_message(components=None)
except Exception:
pass
async def get_sendable_data(
self,
ctx: commands.SlashContext,
component_custom_id: Optional[str] = None) -> dict:
"""
Gets a dictionary of sendable objects to unpack for the :func:`start` method.
"""
# Make our output lists
output_strings = []
buttons = []
# Add items to the list
async with ctx.bot.database() as db:
ctx.database = db # type: ignore - context doesn't have slots deliberately
options = await self.get_options(ctx, force_regenerate=True)
for i in options:
output = await i.get_display(ctx)
if output:
output_strings.append(f"\N{BULLET} {output}")
style = (
discord.ui.ButtonStyle.secondary
if isinstance(i._callback, Menu)
else None
) or i._button_style or discord.ui.ButtonStyle.primary
buttons.append(discord.ui.Button(
label=i.component_display,
custom_id=i._component_custom_id,
style=style,
))
ctx.database = None # type: ignore - context doesn't have slots deliberately
# Add a done button
buttons.append(
discord.ui.Button(
label="Done",
custom_id=component_custom_id,
style=discord.ui.ButtonStyle.success,
),
)
# Output
components = discord.ui.MessageComponents.add_buttons_with_rows(*buttons)
embed = discord.Embed(colour=0xffffff)
embed.description = "\n".join(output_strings) or "No options added."
return {
"content": None,
"embeds": [embed],
"components": components,
}
class MenuIterable(Menu, Option):
"""
A menu instance that takes and shows iterable data.
"""
allow_none = False
def __init__(
self,
*,
select_sql: str,
insert_sql: str,
delete_sql: str,
row_text_display: Callable[[AnyContext, Dict[str, Any]], str],
row_component_display: Callable[[AnyContext, Dict[str, Any]], Union[str, Tuple[str, str]]],
converters: List[Converter],
select_sql_args: Callable[[AnyContext], Iterable[Any]],
insert_sql_args: Callable[[AnyContext, List[Any]], Iterable[Any]],
delete_sql_args: Callable[[AnyContext, Dict[str, Any]], Iterable[Any]],
cache_callback: Optional[Callable[[AnyContext, List[Any]], None]] = None,
cache_delete_callback: Optional[Callable[[str], Callable[[AnyContext, List[Any]], None]]] = None,
cache_delete_args: Optional[Callable[[Dict[str, Any]], Iterable[Any]]] = None):
"""
Parameters
----------
select_sql : str
The SQL that should be used to select the rows to be displayed from the database.
insert_sql : str
The SQL that should be used to insert the data into the database.
delete_sql : str
The SQL that should be used to delete a row from the database.
row_text_display : Callable[[AnyContext, Dict[str, Any]], str]
A function returning a string which should
be showed in the menu. The dict given is the row from the database.
row_component_display : Callable[[AnyContext, Dict[str, Any]], Union[str, Tuple[str, str]]]
A function returning a string which should be shown on the component.
The dict given is the row from the database. If one string is
returned, it's used for both the button and its custom ID.
If two strings are given, the first is used for the button
and the second for the custom ID.
converters : List[Converter]
A list of converters that the user should be asked for.
select_sql_args : Callable[[AnyContext], Iterable[Any]]
A function returning a list of arguments that should be
passed to the database select. The list given is args
that are passed to the select statement.
insert_sql_args : Callable[[AnyContext, List[Any]], Iterable[Any]]
A function returning a list of arguments that should
be passed to the database insert. The list given is
a list of items returned from the option.
delete_sql_args : Callable[[AnyContext, Dict[str, Any]], Iterable[Any]]
A function returning a list of arguments that should
be passed to the database delete. The dict given is
a row from the database.
cache_callback : Optional[Callable[[AnyContext, List[Any]], None]]
A function that takes in a context and a list of
converted items from the user for you to cache as
you please.
cache_delete_callback : Optional[Callable[[str], Callable[[AnyContext, List[Any]], None]]]
A function that returns a function that takes in
a context and a list of converted items from
the user for you to remove from the cache as you please.
The initial function takes in the data returned from
``cache_delete_args``.
cache_delete_args : Optional[Callable[[Dict[str, Any]], Iterable[Any]]]
A function that takes in a row from the database
and returns a list of items to be passed into
``cache_delete_callback``.
"""
self.row_text_display = row_text_display
self.row_component_display = row_component_display
self.converters = converters
self.cache_callback = cache_callback or _do_nothing()
self.cache_delete_callback = cache_delete_callback or _do_nothing()
self.cache_delete_args = cache_delete_args or _do_nothing(list)
self.select_sql = select_sql
self.select_sql_args = select_sql_args or _do_nothing(list)
self.insert_sql = insert_sql
self.insert_sql_args = insert_sql_args or _do_nothing(list)
self.delete_sql = delete_sql
self.delete_sql_args = delete_sql_args or _do_nothing(list)
self._options = None
def insert_database_call(self):
"""
Run the insert database call.
"""
async def wrapper(ctx, data):
args = self.insert_sql_args(ctx, data)
async with ctx.bot.database() as db:
await db(self.insert_sql, *args)
return wrapper
def delete_database_call(self, row):
"""
Run the delete database call.
"""
async def wrapper(ctx, data):
args = self.delete_sql_args(ctx, row)
async with ctx.bot.database() as db:
await db(self.delete_sql, *args)
return wrapper
async def get_options(
self,
ctx: SlashContext,
force_regenerate: bool = False):
"""
Get all of the options for an instance.
This method has an open database instance in :code:`Context.database`.
"""
# Let's not generate new ones if we don't need to
if not force_regenerate and self._options is not None:
return self._options
# Grab our data from the database
rows = await ctx.database(self.select_sql, *list(self.select_sql_args(ctx)))
generated = []
# Make buttons for deleting the data
for i in rows:
v = Option(
display=self.row_text_display(ctx, i),
component_display=self.row_component_display(ctx, i),
callback=self.delete_database_call(i),
cache_callback=self.cache_delete_callback(*list(self.cache_delete_args(i)))
)
v._button_style = discord.ui.ButtonStyle.danger
generated.append(v)
# Add "add new" button
if len(generated) <= 20:
v = Option(
display=None,
component_display="Add New",
converters=self.converters,
callback=self.insert_database_call(),
cache_callback=self.cache_callback
)
v._button_style = discord.ui.ButtonStyle.secondary
generated.append(v)
# And return
self._options = generated
return generated
```
#### File: cogs/utils/time_value.py
```python
from __future__ import annotations
import math
import re
from datetime import timedelta
from discord.ext import commands
class InvalidTimeDuration(commands.BadArgument):
"""
A conversion error for an invalid input passed to :class:`voxelbotutils.TimeValue`.
Attributes:
value (str): The value that was given that failed to parse.
"""
def __init__(self, value: str):
self.value: str = value
def __str__(self):
return f"The value `{self.value}` could not be converted to a valid time duration."
class TimeValue(object):
"""
An object that nicely converts an integer value into an easily readable string.
This util is also available as an argument converter for your commands,
though it can be used outide of being a converter as well via use of the :func:`parse`
method.
Examples:
::
>>> value = voxelbotutils.TimeValue(606)
>>> value.clean
'10m6s'
>>> value.clean_spaced
'10m 6s'
>>> value = voxelbotutils.TimeValue.parse('10m6s')
>>> value.duration
606
Note:
This does not support partial seconds, and can only support a max of *about* 68 years
(2^31 seconds).
Attributes:
duration (int): The entire duration, in seconds, of the timevalue object.
years (int): The number of years that the object represents.
days (int): The number of days that the object represents.
hours (int): The number of hours that the object represents.
minutes (int): The number of minutes that the object represents.
seconds (int): The number of seconds that the object represents.
clean_full (str): A string form of the object in form "10 hours 3 minutes".
clean_spaced (str): A string form of the object in form "10h 3m".
clean (str): A string form of the object in form "10h3m".
delta (datetime.timedelta): A timedelta for the entire timevalue object.
"""
TIME_VALUE_REGEX = re.compile(r"^(?:(?P<years>\d+)y)? *(?:(?P<weeks>\d+)w)? *(?:(?P<days>\d+)d)? *(?:(?P<hours>\d+)h)? *(?:(?P<minutes>\d+)m)? *(?:(?P<seconds>\d+)s)?$")
MAX_SIZE = 0b1111111111111111111111111111111 # 2**31 - this is about 68 years so anything above this is a bit...... much
def __init__(self, duration: float):
"""
Args:
duration (float): The duration to be converted.
Warning:
Provided values will be rounded up to the nearest integer.
Raises:
InvalidTimeDuration: If the provided time duration was invalid.
"""
self.duration: int = math.ceil(duration)
remaining = self.duration
self.years, remaining = self._get_quotient_and_remainder(remaining, 60 * 60 * 24 * 365)
self.days, remaining = self._get_quotient_and_remainder(remaining, 60 * 60 * 24)
self.hours, remaining = self._get_quotient_and_remainder(remaining, 60 * 60)
self.minutes, remaining = self._get_quotient_and_remainder(remaining, 60)
self.seconds = remaining
self.clean_spaced = ' '.join([i for i in [
f"{self.years}y" if self.years > 0 else None,
f"{self.days}d" if self.days > 0 else None,
f"{self.hours}h" if self.hours > 0 else None,
f"{self.minutes}m" if self.minutes > 0 else None,
f"{self.seconds}s" if self.seconds > 0 else None,
] if i])
self.clean_full = ' '.join([i for i in [
f"{self.years} years" if self.years > 1 else f"{self.years} year" if self.years >= 1 else None,
f"{self.days} days" if self.days > 1 else f"{self.days} day" if self.days >= 1 else None,
f"{self.hours} hours" if self.hours > 1 else f"{self.hours} hour" if self.hours >= 1 else None,
f"{self.minutes} minutes" if self.minutes > 1 else f"{self.minutes} minute" if self.minutes >= 1 else None,
f"{self.seconds} seconds" if self.seconds > 1 else f"{self.seconds} second" if self.seconds >= 1 else None,
] if i])
self.clean_days = ' '.join([i for i in [
f"{self.years} years" if self.years > 1 else f"{self.years} year" if self.years >= 1 else None,
f"{self.days} days" if self.days > 1 else f"{self.days} day" if self.days >= 1 else None,
] if i]) or 'less than a day'
if self.duration > self.MAX_SIZE:
raise InvalidTimeDuration(self.clean)
self.clean = self.clean_spaced.replace(" ", "")
self.delta = timedelta(seconds=self.duration)
@staticmethod
def _get_quotient_and_remainder(value: int, divisor: int):
"""
A divmod wrapper that just catches a zero division error.
"""
try:
return divmod(value, divisor)
except ZeroDivisionError:
return 0, value
def __str__(self):
return self.clean
def __repr__(self):
return f"{self.__class__.__name__}.parse('{self.clean}')"
@classmethod
async def convert(cls, ctx: commands.Context, value: str) -> TimeValue:
"""
Takes a value (1h/30m/10s/2d etc) and returns a TimeValue instance with the duration.
Provided for use of the Discord.py module.
Args:
ctx (discord.ext.commands.Context): The current context object that we want to convert under.
value (str): The value string to be converted.
Returns:
voxelbotutils.TimeValue: A time value instance.
Raises:
voxelbotutils.errors.InvalidTimeDuration: If the time could not be successfully converted.
"""
return cls.parse(value)
@classmethod
def parse(cls, value: str) -> TimeValue:
"""
Takes a value (1h/30m/10s/2d etc) and returns a TimeValue instance with the duration.
Args:
value (str): The value string to be converted.
Returns:
voxelbotutils.TimeValue: A time value instance.
Raises:
voxelbotutils.errors.InvalidTimeDuration: If the time could not be successfully converted.
"""
# If the value given is purely a number, add the "minute" unit to the end
if value.isdigit():
value += "m"
match = cls.TIME_VALUE_REGEX.search(value)
if match is None:
raise InvalidTimeDuration(value)
duration = 0
if match.group('years'):
duration += int(match.group('years')) * 60 * 60 * 24 * 365
if match.group('weeks'):
duration += int(match.group('weeks')) * 60 * 60 * 24 * 7
if match.group('days'):
duration += int(match.group('days')) * 60 * 60 * 24
if match.group('hours'):
duration += int(match.group('hours')) * 60 * 60
if match.group('minutes'):
duration += int(match.group('minutes')) * 60
if match.group('seconds'):
duration += int(match.group('seconds'))
return cls(duration)
```
#### File: cogs/utils/twitch_stream.py
```python
from datetime import datetime, timezone
from typing import List
class TwitchStream:
"""
A container class for parts of a Twitch stream.
Attributes
-----------
id: :class:`str`
The ID of the stream.
user_id: :class:`str`
The ID of the user who's streaming.
user_login: :class:`str`
The login name of the user who's streaming.
user_name: :class:`str`
The display name of the user who's streaming.
game_id: :class:`str`
The ID of the game that the user is playing.
game_name: :class:`str`
The name of the game that the user is playing.
type: :class:`str`
The stream status. Will only be "live".
title: :class:`str`
The title of the stream.
viewer_count: :class:`int`
The viewer count for the stream.
started_at: :class:`datetime.datetime`
An ISO 8601 timestamp for the stream's start time.
language: :class:`str`
The language code for the stream's language.
thumbnail_url: :class:`str`
A URL for the stream's thumbnail, with placeholder "{width}" and "{height}"
format string placeholders.
tag_ids: List[:class:`str`]
The IDs of the tags assigned to the stream.
is_mature: :class:`bool`
Whether or not the stream is set to mature.
"""
def __init__(self, *, data: dict):
self.id: str = data['id']
self.user_id: str = data['user_id']
self.user_login: str = data['user_login']
self.user_name: str = data['user_name']
self.game_id: str = data['game_id']
self.game_name: str = data['game_name']
self.type: str = data['type']
self.title: str = data['title']
self.viewer_count: int = data['viewer_count']
started_at = datetime.fromisoformat(data['started_at'][:-1]) # It has a Z on the end :(
started_at.replace(tzinfo=timezone.utc)
self.started_at: datetime = started_at
self.language: str = data['language']
self.thumbnail_url: str = data['thumbnail_url']
self.tag_ids: List[str] = data['tag_ids']
self.is_mature: bool = data['is_mature']
```
#### File: web/utils/oauth_models.py
```python
import typing
import discord
class OauthGuild(object):
"""
A guild object from an oauth integration.
Attributes:
id (int): The ID of the guild.
name (str): The name of the guild.
icon (discord.Asset): The guild's icon.
owner_id (int): The ID of the owner for the guild.
This will either be the ID of the authenticated user or `0`.
features (typing.List[str]): A list of features that the guild has.sa
"""
def __init__(self, bot, guild_data, user):
self.id: int = int(guild_data.get("id"))
self.name: str = guild_data.get("name")
self._icon: typing.Optional[str] = guild_data.get("icon")
self.owner_id: int = user.id if guild_data.get("owner") else 0
self.features: typing.List[str] = guild_data.get("features")
self._bot: discord.Client = bot
@property
def icon(self) -> typing.Optional[discord.Asset]:
"""Optional[:class:`Asset`]: Returns the guild's icon asset, if available."""
if self._icon is None:
return None
return discord.Asset._from_guild_icon(None, self.id, self._icon)
async def fetch_guild(self, bot=None) -> typing.Optional[discord.Guild]:
"""
Fetch the original :class:`discord.Guild` object from the API using the authentication from the
bot given.
Args:
bot: The bot object that you want to use to fetch the guild.
Returns:
typing.Optional[discord.Guild]: The guild instance.
"""
bot = bot or self._bot
try:
return await bot.fetch_guild(self.id)
except discord.HTTPException:
return None
class OauthUser(object):
"""
A user object from an oauth integration.
Attributes:
id (int): The ID of the user.
username (str): The user's username.
avatar (discord.Asset): The user's avatar asset.
discriminator (str): The user's discrimiator.
public_flags (discord.PublicUserFlags): The user's public flags.
locale (str): The locale of the user.
mfa_enabled (bool): Whether or not the user has MFA enabled.
"""
def __init__(self, user_data):
self.id: int = int(user_data['id'])
self.username: str = user_data.get("username")
self._avatar: str = user_data.get("avatar")
self.discriminator: str = user_data.get("discriminator")
self.public_flags: discord.PublicUserFlags = discord.PublicUserFlags._from_value(user_data.get("public_flags", 0))
self.locale: str = user_data.get("locale")
self.mfa_enabled: bool = user_data.get("mfa_enabled", False)
@property
def avatar(self) -> typing.Optional[discord.Asset]:
"""Optional[:class:`Asset`]: Returns the guild's icon asset, if available."""
if self._avatar is None:
return None
return discord.Asset._from_avatar(None, self.id, self._avatar)
class OauthMember(OauthUser):
"""
A user object from an oauth integration.
Attributes:
id (int): The ID of the user.
username (str): The user's username.
avatar (str): The user's avatar hash.
avatar_url (discord.Asset): The user's avatar.
discriminator (str): The user's discrimiator.
public_flags (discord.PublicUserFlags): The user's public flags.
locale (str): The locale of the user.
mfa_enabled (bool): Whether or not the user has MFA enabled.
guild (OauthGuild): The guild object that this member is a part of.
guild_permissions (discord.Permissions): The permissions that this member has on the guild.
"""
def __init__(self, bot, guild_data, user_data):
super().__init__(user_data)
self.guild: OauthGuild = OauthGuild(bot, guild_data, self)
self.guild_permissions: discord.Permissions = discord.Permissions(guild_data['permissions'])
```
#### File: web/utils/process_discord_login.py
```python
import asyncio
from urllib.parse import urlencode
from datetime import datetime as dt, timedelta
import typing
import aiohttp
import aiohttp_session
from aiohttp.web import HTTPFound, Request, json_response
import yarl
from .get_avatar_url import get_avatar_url
from .oauth_models import OauthMember
def get_discord_login_url(request: Request, redirect_uri: str = None) -> str:
"""
Returns a login URL for your website based on the oauth information given in
your :class:`website config<WebsiteConfig.oauth>`.
Args:
request (Request): The request from which this command call is coming from.
redirect_uri (str, optional): Where the user should be redirected to after pressing authorize.
oauth_scopes (list, optional): The scopes that the login URL will ask for. Does not necessarily mean we'll get them.
Returns:
str: The login URL that we want to use.
"""
config = request.app['config']
oauth_data = config['oauth']
oauth_scopes = config['oauth_scopes']
parameters = {
'response_type': 'code',
'client_id': oauth_data['client_id'],
}
if redirect_uri:
if 'http' not in redirect_uri:
redirect_uri = config['website_base_url'].rstrip('/') + '/' + redirect_uri.lstrip('/')
parameters['redirect_uri'] = redirect_uri
if oauth_scopes:
parameters['scope'] = ' '.join(oauth_scopes)
return 'https://discordapp.com/api/v6/oauth2/authorize?' + urlencode(parameters)
async def process_discord_login(request: Request) -> None:
"""
Process a Discord login and store the information in the provided session based off
of a callback from your Discord redirect URI.
Args:
request (Request): The request from which this command call is coming from.
oauth_scopes (list): The list of oauth scopes that we asked for.
"""
# Get the code
code = request.query.get('code')
if not code:
return HTTPFound(location='/')
# Get the bot
config = request.app['config']
oauth_data = config['oauth']
oauth_scopes = config['oauth_scopes']
# Generate the post data
data = {
'grant_type': 'authorization_code',
'code': code,
'scope': ' '.join(oauth_scopes),
**oauth_data,
}
base_url = yarl.URL(config['website_base_url'])
if base_url.explicit_port:
data['redirect_uri'] = "{0.scheme}://{0.host}:{0.port}{1.path}".format(base_url, request.url)
else:
data['redirect_uri'] = "{0.scheme}://{0.host}{1.path}".format(base_url, request.url)
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
# Make session so we can do stuff with it
session_storage = await aiohttp_session.get_session(request)
# Make the request
async with aiohttp.ClientSession(loop=request.loop) as session:
# Get auth
token_url = "https://discordapp.com/api/v6/oauth2/token"
async with session.post(token_url, data=data, headers=headers) as r:
token_info = await r.json()
if token_info.get('error'):
token_info['redirect_uri'] = data['redirect_uri']
session_storage['login_error'] = token_info
return json_response(token_info) # Error getting the token, just ignore it
# Update headers
headers.update({
"Authorization": f"Bearer {token_info['access_token']}"
})
token_info['expires_at'] = (dt.utcnow() + timedelta(seconds=token_info['expires_in'])).timestamp()
updated_token_info = session_storage.get('token_info', dict())
updated_token_info.update(token_info)
session_storage['token_info'] = updated_token_info
# Get user
if "identify" in oauth_scopes:
await get_user_info_from_session(request, refresh=True)
async def get_user_info_from_session(request: Request, *, refresh: bool = False):
"""
Get the user's info.
"""
session_storage = await aiohttp_session.get_session(request)
if refresh is False:
return session_storage['user_info']
user_url = "https://discordapp.com/api/v6/users/@me"
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
headers.update({
"Authorization": f"Bearer {session_storage['token_info']['access_token']}"
})
async with aiohttp.ClientSession(loop=request.loop) as session:
async with session.get(user_url, headers=headers) as r:
user_info = await r.json()
user_info['avatar_url'] = get_avatar_url(user_info)
session_storage['user_info'] = user_info
session_storage['user_id'] = int(user_info['id'])
session_storage['logged_in'] = True
return user_info
async def get_access_token_from_session(
request: Request, *, refresh_if_expired: bool = True,
refresh: bool = False) -> str:
"""
Get the access token for a given user.
"""
# Get relevant data
session_storage = await aiohttp_session.get_session(request)
config = request.app['config']
oauth_data = config['oauth']
oauth_scopes = config['oauth_scopes']
# See if we even need to make a new request
if refresh:
pass
elif refresh_if_expired is False or session_storage['token_info']['expires_at'] < dt.utcnow().timestamp():
return session_storage['token_info']['access_token']
# Generate the post data
data = {
'grant_type': 'refresh_token',
'scope': ' '.join(oauth_scopes or session_storage['token_info']['scope']),
**oauth_data,
}
if request.url.explicit_port:
data['redirect_uri'] = "http://{0.host}:{0.port}{0.path}".format(request.url)
else:
data['redirect_uri'] = "https://{0.host}{0.path}".format(request.url)
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
# Make the request
async with aiohttp.ClientSession(loop=request.loop) as session:
# Get auth
token_url = "https://discordapp.com/api/v6/oauth2/token"
async with session.post(token_url, data=data, headers=headers) as r:
token_info = await r.json()
if token_info.get('error'):
return "" # Error getting the token, just ignore it, TODO raise something
# Store data
token_info['expires_at'] = (dt.utcnow() + timedelta(seconds=token_info['expires_in'])).timestamp()
updated_token_info = session_storage['token_info']
updated_token_info.update(token_info)
session_storage['token_info'] = updated_token_info
return updated_token_info['access_token']
async def get_user_guilds_from_session(request: Request, bot_key: str = "bot") -> typing.List[OauthMember]:
"""
Returns a list of guilds that the user is in based on the request's logged in user.
"""
# Get auth
session_storage = await aiohttp_session.get_session(request)
token_info = session_storage['token_info']
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': f'Bearer {token_info["access_token"]}'
}
# Make the request
async with aiohttp.ClientSession(loop=request.loop) as session:
guilds_url = "https://discordapp.com/api/v6/users/@me/guilds"
# Loop until success
async with session.get(guilds_url, headers=headers) as r:
guild_info = await r.json()
if not r.ok:
return [] # Missing permissions or server error
# Return guild info
bot = request.app['bots'].get(bot_key)
return [OauthMember(bot, i, session_storage['user_info']) for i in guild_info]
async def add_user_to_guild_from_session(request: Request, bot_index: str, guild_id: int) -> bool:
"""
Adds the user to the given guild (if the correct scopes were previously provided).
Returns a boolean of whether or not that user was added (or was already in the guild) successfully.
"""
# Get the bot
session_storage = await aiohttp_session.get_session(request)
user_info = session_storage['user_info']
# Get our headers
guild_join_url = f"https://discordapp.com/api/v6/guilds/{guild_id}/members/{user_info['id']}"
headers = {
'Authorization': f"Bot {request.app['config']['discord_bots'][bot_index]}"
}
# Get our access token
data = {
'access_token': await get_access_token_from_session(request)
}
# Make the request
async with aiohttp.ClientSession(loop=request.loop) as session:
async with session.put(guild_join_url, headers=headers, json=data) as r:
return str(r.status)[0] == '2' # 201 - Added; 204 - Already in the guild
```
#### File: discord/ui/button.py
```python
from __future__ import annotations
import uuid
from typing import Optional, TYPE_CHECKING, Union
from .models import DisableableComponent
from ..enums import ButtonStyle, ComponentType
from ..partial_emoji import PartialEmoji, _EmojiTag
from ..errors import InvalidArgument
if TYPE_CHECKING:
from ..emoji import Emoji
from ..types.components import (
Button as ButtonPayload,
)
class Button(DisableableComponent):
"""Represents a UI button.
Attributes
------------
style: :class:`discord.ButtonStyle`
The style of the button.
custom_id: Optional[:class:`str`]
The ID of the button that gets received during an interaction.
If this button is for a URL, it does not have a custom ID.
url: Optional[:class:`str`]
The URL this button sends you to.
disabled: :class:`bool`
Whether the button is disabled or not.
label: Optional[:class:`str`]
The label of the button, if any.
emoji: Optional[Union[:class:`.PartialEmoji`, :class:`.Emoji`, :class:`str`]]
The emoji of the button, if available.
"""
__slots__ = ("label", "style", "custom_id", "emoji", "url", "disabled",)
TYPE = ComponentType.button
def __init__(
self, *, label: Optional[str] = None, custom_id: Optional[str] = None,
style: Optional[ButtonStyle] = None,
emoji: Optional[Union[str, Emoji, PartialEmoji]] = None, url: Optional[str] = None,
disabled: Optional[bool] = False):
self.label = label
self.style = style or ButtonStyle.secondary
self.custom_id = custom_id or str(uuid.uuid1())
if emoji is not None:
if isinstance(emoji, str):
self.emoji = PartialEmoji.from_str(emoji)
elif isinstance(emoji, _EmojiTag):
self.emoji = emoji._to_partial()
else:
raise TypeError(f'expected emoji to be str, Emoji, or PartialEmoji not {emoji.__class__}')
else:
self.emoji = None
self.url = url
self.disabled = disabled
if url is None and self.style == ButtonStyle.link:
raise InvalidArgument("Missing URL for button type of link")
if url is not None and self.style != ButtonStyle.link:
raise InvalidArgument("Incompatible URL passed for button not of type link")
if not label and not emoji:
raise InvalidArgument("Both label and emoji cannot be empty")
def __repr__(self) -> str:
attrs = (
('label', self.label),
('style', self.style),
('custom_id', self.custom_id),
('emoji', self.emoji),
('url', self.url),
('disabled', self.disabled),
)
inner = ' '.join('%s=%r' % t for t in attrs)
return f'{self.__class__.__name__}({inner})'
def to_dict(self) -> ButtonPayload:
v = {
"type": self.TYPE.value,
"label": self.label,
"style": self.style.value,
"disabled": self.disabled,
}
if self.emoji:
v.update({"emoji": self.emoji.to_dict()})
if self.url:
v.update({"url": self.url})
else:
v.update({"custom_id": self.custom_id})
return v
@classmethod
def from_dict(cls, data: ButtonPayload) -> Button:
"""
Construct an instance of a button from an API response.
Parameters
-----------
data: :class:`dict`
The payload data that the button should be constructed from.
Returns
-------
:class:`discord.ui.Button`
The button that the payload describes.
"""
emoji = data.get("emoji")
if emoji is not None:
emoji = PartialEmoji.from_dict(emoji)
return cls(
label=data.get("label"),
style=ButtonStyle(data.get("style", ButtonStyle.secondary.value)),
custom_id=data.get("custom_id"),
url=data.get("url"),
emoji=emoji,
disabled=data.get("disabled", False),
)
@classmethod
def confirm(cls, **kwargs) -> Button:
"""
Give you a button instance with the text "Confirm", custom ID "CONFIRM", and
a style of success.
"""
return cls(
label=kwargs.pop("label", "Confirm"),
custom_id=kwargs.pop("custom_id", "CONFIRM"),
style=kwargs.pop("style", ButtonStyle.success),
**kwargs,
)
@classmethod
def cancel(cls, **kwargs) -> Button:
"""
Give you a button instance with the text "Cancel", custom ID "CANCEL", and
a style of danger.
"""
return cls(
label=kwargs.pop("label", "Cancel"),
custom_id=kwargs.pop("custom_id", "CANCEL"),
style=kwargs.pop("style", ButtonStyle.danger),
**kwargs,
)
``` |
{
"source": "6days9weeks/september",
"score": 3
} |
#### File: september/genshin/genshin.py
```python
import datetime
import discord
import humanize
import pytz
from discord.ext import commands, tasks
# Credits to https://github.com/Cog-Creators/Red-DiscordBot/blob/ded5aff08cfe443498770e7f27035db694e72c30/redbot/core/utils/chat_formatting.py#L86
def box(text: str, lang: str = "") -> str:
"""Get the given text in a code block.
Parameters
----------
text : str
The text to be marked up.
lang : `str`, optional
The syntax highlighting language for the codeblock.
Returns
-------
str
The marked up text.
"""
ret = "```{}\n{}```".format(lang, text)
return ret
class Genshin(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.coll = bot.plugin_db.get_partition(self)
self._status_embed.start()
self.channel = None
self.message = None
self.image = None
self.bot.loop.create_task(self.cog_load())
def cog_unload(self):
"""Cancel the loop when the cog is unloaded"""
self._status_embed.cancel()
async def cog_load(self):
await self.obtain_shit()
@tasks.loop(minutes=1)
async def _status_embed(self):
if self.message != None:
await self.edit_embed()
@staticmethod
def natime():
"""Get time left for daily in NA till 4am their time."""
now = datetime.datetime.now(pytz.timezone("America/Chicago"))
utc_time_for_tz_loop: datetime.datetime = (
datetime.datetime.combine(
now.date() + datetime.timedelta(days=1), datetime.time(hour=4)
)
- now.utcoffset()
)
delta = utc_time_for_tz_loop - datetime.datetime.utcnow()
return humanize.time.precisedelta(delta, minimum_unit="minutes", format="%0.f")
@staticmethod
def natimew():
"""Get time left for weekly in NA till 4am their time."""
now = datetime.datetime.now(pytz.timezone("America/Chicago"))
utc_time_for_tz_loop: datetime.datetime = (
datetime.datetime.combine(
now.date() + datetime.timedelta(days=7 - now.weekday()), datetime.time(hour=4)
)
- now.utcoffset()
)
delta = utc_time_for_tz_loop - datetime.datetime.utcnow()
return humanize.time.precisedelta(delta, minimum_unit="minutes", format="%0.f")
@staticmethod
def asartime():
"""Get time left for daily in Asia/SAR till 4am their time."""
now = datetime.datetime.now(pytz.timezone("Asia/Hong_Kong"))
utc_time_for_tz_loop: datetime.datetime = (
datetime.datetime.combine(
now.date() + datetime.timedelta(days=1), datetime.time(hour=4)
)
- now.utcoffset()
)
delta = utc_time_for_tz_loop - datetime.datetime.utcnow()
return humanize.time.precisedelta(delta, minimum_unit="minutes", format="%0.f")
@staticmethod
def asartimew():
"""Get time left for weekly in Asia/SAR till 4am their time."""
now = datetime.datetime.now(pytz.timezone("Asia/Hong_Kong"))
utc_time_for_tz_loop: datetime.datetime = (
datetime.datetime.combine(
now.date() + datetime.timedelta(days=7 - now.weekday()), datetime.time(hour=4)
)
- now.utcoffset()
)
delta = utc_time_for_tz_loop - datetime.datetime.utcnow()
return humanize.time.precisedelta(delta, minimum_unit="minutes", format="%0.f")
@staticmethod
def eutime():
"""Get time left for daily in EU till 4am their time."""
now = datetime.datetime.now(pytz.timezone("Europe/Dublin"))
time_for_4_am: datetime.datetime = (
datetime.datetime.combine(
now.date() + datetime.timedelta(days=0), datetime.time(hour=4)
)
)
delta = pytz.timezone("Europe/Dublin").localize(time_for_4_am) - datetime.datetime.now(pytz.timezone("Europe/Dublin"))
return humanize.time.precisedelta(delta, minimum_unit="minutes", format="%0.f")
@staticmethod
def eutimew():
"""Get time left for weekly in EU till 4am their time."""
now = datetime.datetime.now(pytz.timezone("Europe/Dublin"))
utc_time_for_tz_loop: datetime.datetime = (
datetime.datetime.combine(
now.date() + datetime.timedelta(days=7 - now.weekday()), datetime.time(hour=4)
)
- now.utcoffset()
)
delta = utc_time_for_tz_loop - datetime.datetime.utcnow()
return humanize.time.precisedelta(delta, minimum_unit="minutes", format="%0.f")
@staticmethod
async def status_embed(self):
"""The status embed or smth"""
embed = discord.Embed(
title="Server Status",
description="Members: {}".format(self.bot.modmail_guild.member_count),
timestamp=datetime.datetime.utcnow(),
)
embed.add_field(
name="Server Time:",
value="{NA}• Daily reset in {NADaily}\n• Weekly reset in {NAWeekly}\n{EU}• Daily reset in {EUDaily}\n• Weekly reset in {EUWeekly}\n{ASI}• Daily reset in {ASIDaily}\n• Weekly reset in {ASIWeekly}\n{SAR}• Daily reset in {SARDaily}\n• Weekly reset in {SARWeekly}".format(
NA=box(
"# NA "
+ datetime.datetime.now(pytz.timezone("America/Chicago")).strftime("%I:%M %p"),
"md",
),
NADaily=self.natime(),
NAWeekly=self.natimew(),
EU=box(
"# EU "
+ datetime.datetime.now(pytz.timezone("Europe/Dublin")).strftime("%I:%M %p")
),
EUDaily=self.eutime(),
EUWeekly=self.eutimew(),
ASI=box(
"# ASIA "
+ datetime.datetime.now(pytz.timezone("Asia/Hong_Kong")).strftime("%I:%M %p"),
"glsl",
),
ASIDaily=self.asartime(),
ASIWeekly=self.asartimew(),
SAR=box(
"# SAR "
+ datetime.datetime.now(pytz.timezone("Asia/Hong_Kong")).strftime("%I:%M %p"),
"fix",
),
SARDaily=self.asartime(),
SARWeekly=self.asartimew(),
),
)
if self.image != None:
url = self.image + "?size=4096"
else:
url = "https://cdn.discordapp.com/banners/522681957373575168/e5ff2cb0b8c102ee4f2e1f02b728bc99.webp?size=2048"
embed.set_image(url=url)
return embed
async def edit_embed(self):
chan = self.bot.get_channel(self.channel)
msg = await chan.fetch_message(self.message)
await msg.edit(embed=await self.status_embed(self))
async def obtain_shit(self):
config = await self.coll.find_one({"_id": "config"})
try:
self.channel = int(config["status-channel"]["channel"]) or None
self.message = int(config["status-embed"]["message"]) or None
self.image = config["image-url"]["url"] or None
except Exception:
pass
@commands.command()
@commands.has_permissions(manage_messages=True)
async def setstatuschan(self, ctx, *, channel: discord.TextChannel):
"""Set status channel."""
message = await self.bot.get_channel(channel.id).send(
embed=discord.Embed(
title="This is a test embed which will be gone in few minutes. Please don't delete it. This will be the status embed.",
color=0xFFCDCD,
)
)
await self.coll.find_one_and_update(
{"_id": "config"},
{"$set": {"status-channel": {"channel": str(channel.id)}}},
upsert=True,
)
await self.coll.find_one_and_update(
{"_id": "config"},
{"$set": {"status-embed": {"message": str(message.id)}}},
upsert=True,
)
embed = discord.Embed(
title=f"The status channel has been set to #{channel}.\nThe status function will auto-start now.",
color=0xFFCDCD,
)
embed.set_footer(text="you're amazing~!!")
await ctx.send(embed=embed)
@commands.command()
@commands.has_permissions(manage_messages=True)
async def seturl(self, ctx, url):
"""Provide an image url for the status embed"""
if (
url.endswith(".png")
or url.endswith(".jpeg")
or url.endswith(".gif")
or url.endswith(".jpg")
or url.endswith(".webp")
):
await self.coll.find_one_and_update(
{"_id": "config"},
{"$set": {"image-url": {"url": str(url)}}},
upsert=True,
)
embed = discord.Embed(title=f"The image url has been set.", color=0xFFCDCD)
embed.set_image(url=url)
embed.set_footer(text="you're amazing~!!")
await ctx.send(embed=embed)
else:
await ctx.reply("Give an valid url and it should be `png/jpeg/gif/jpg/webp`.")
@commands.command()
@commands.has_permissions(manage_messages=True)
async def imessedup(self, ctx):
await self.obtain_shit()
await ctx.reply("Should work.")
def setup(bot):
cog = Genshin(bot)
bot.add_cog(cog)
``` |
{
"source": "6desislava6/PyDay",
"score": 2
} |
#### File: PyDay/pyday_alarms/views.py
```python
from django.shortcuts import render
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.views.generic import View
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from pyday_alarms.services import update_alarms
from pyday_alarms.forms import CreateAlarmForm
from pyday_calendar.forms import CreateEventForm
from pyday_alarms.models import Alarm
from datetime import datetime, timedelta
class AlarmView(View):
@method_decorator(login_required)
def get(self, request):
# да излизат всички аларми?
form_alarm = CreateAlarmForm()
return render(request, 'create_alarm.html', locals())
# It creates a new alarm and updates all on the raspberry
@method_decorator(login_required)
def post(self, request):
form = CreateAlarmForm(request.POST)
if form.is_valid():
form = form.cleaned_data
date = form['date'] + timedelta(hours=int(request.POST["hour"]),
minutes=int(request.POST["mins"]))
Alarm(user=request.user, message=form['message'], date=date).save()
if not update_alarms(request.user):
return render(request, 'error.html', {'error': "Raspberry!"})
return HttpResponseRedirect('/social/main')
else:
return render(request, 'error.html', {'error': "Invalid form."})
```
#### File: PyDay/pyday_social_network/tests.py
```python
from django.test import TestCase
from django.db.utils import IntegrityError
from pyday_social_network.models import PyDayUser, FollowingRelation
from pyday_social_network.views import *
from django.test import Client
from django.core.urlresolvers import reverse
# from django.core.files.uploadedfile import SimpleUploadedFile
from django.test.client import RequestFactory
from pyday_social_network.services import *
from pyday_social_network.forms import RegisterUserForm
from datetime import datetime
from pyday.settings import GREETINGS
class PyDayUserTest(TestCase):
def setUp(self):
self.user = PyDayUser.objects._create_user("<EMAIL>", "secret",
"MynameisWhat",
"MynameisWho")
def test_user_creation(self):
self.assertEqual(self.user.first_name, "MynameisWhat")
self.assertEqual(self.user.last_name, "MynameisWho")
self.assertEqual(self.user.email, "<EMAIL>")
self.assertNotEqual(self.user.password, "<PASSWORD>")
def test_user_creation_save(self):
PyDayUser.objects._create_user("<EMAIL>", "secret",
"MynameisWhat",
"MynameisWho")
self.assertEqual(len(PyDayUser.objects.all()), 2)
def test_invalid_email(self):
with self.assertRaises(ValueError):
PyDayUser.objects._create_user("", "secret",
"MynameisWhat",
"MynameisWho")
def test_unique_email(self):
with self.assertRaises(IntegrityError):
PyDayUser.objects._create_user("<EMAIL>", "secret",
"MynameisWhat",
"MynameisWho")
class FollowingRelationTest(TestCase):
def setUp(self):
self.user = PyDayUser.objects._create_user("<EMAIL>", "secret",
"MynameisWhat",
"MynameisWho")
self.user2 = PyDayUser.objects._create_user("<EMAIL>", "secret",
"MynameisWhat",
"MynameisWho")
def test_follow(self):
self.user.follow(self.user2.id)
self.assertEqual(len(FollowingRelation.objects.all()), 1)
rel = FollowingRelation.objects.get(pk=1)
self.assertEqual(rel.followed, self.user2)
self.assertEqual(rel.follower, self.user)
def test_unfollow(self):
self.user.follow(self.user2.id)
self.user.unfollow(self.user2.id)
self.assertEqual(len(FollowingRelation.objects.all()), 0)
def test_unfollow_not_followed(self):
self.assertFalse(self.user.unfollow(self.user2.id)[0])
def test_follow_followed(self):
self.user.follow(self.user2.id)
self.assertFalse(self.user.follow(self.user2.id)[0])
def test_following_followers(self):
self.user.follow(self.user2.id)
self.assertEqual(self.user.following, [self.user2])
self.assertEqual(self.user2.followers, [self.user])
def test_friends(self):
self.user.follow(self.user2.id)
self.user2.follow(self.user.id)
self.assertEqual(self.user.friends, [self.user2])
self.assertEqual(self.user2.friends, [self.user])
# Song...
class RegisterViewTest(TestCase):
def setUp(self):
self.client = Client()
def test_post(self):
with self.assertTemplateUsed('error.html'):
response = self.client.post(reverse('pyday_social_network:register_login'),
{'first_name': 'ha', 'last_name': 'ha',
'email': '<EMAIL>',
'password': '1'})
self.assertEqual(response.status_code, 200)
self.assertEqual(len(PyDayUser.objects.all()), 1)
self.assertTrue(PyDayUser.objects.get(email='<EMAIL>'))
self.assertContains(response, "You have registered")
def test_post_not_successful(self):
with self.assertTemplateUsed('error.html'):
response = self.client.post(reverse('pyday_social_network:register_login'),
{'first_name': 'ha', 'last_name': 'ha',
'email': '', 'password': '1'})
class UploadPictureViewTest(TestCase):
def setUp(self):
self.client = Client()
self.user = PyDayUser.objects._create_user("<EMAIL>", "secret",
"MynameisWhat",
"MynameisWho")
self.user2 = PyDayUser.objects._create_user("<EMAIL>", "secret",
"MynameisWhat",
"MynameisWho")
self.client.login(email='<EMAIL>', password='<PASSWORD>')
def test_post(self):
picture = open('./media/pictures/profile.jpg', 'rb')
response = self.client.post(reverse('pyday_social_network:upload_picture'),
{'picture': picture})
self.assertNotEqual(PyDayUser.objects.get(
email="<EMAIL>").picture, PyDayUser.objects.get(email="<EMAIL>").picture)
def tearDown(self):
PyDayUser.objects.get(email='<EMAIL>').delete()
class ViewsTestNotLogged(TestCase):
def setUp(self):
self.client = Client()
self.user = PyDayUser.objects._create_user("<EMAIL>", "secret",
"MynameisWhat",
"MynameisWho")
def test_login_redirect(self):
response = self.client.get(
reverse('pyday_social_network:login'), follow=True)
self.assertEqual(response.redirect_chain,
[('/social/main', 302), ('/social/main/', 301),
('/social/register/?next=/social/main/', 302)])
def test_login_redirect_anonymous(self):
self.client.login(email='<EMAIL>', password='<PASSWORD>')
with self.assertTemplateUsed('main.html'):
response = self.client.get(
reverse('pyday_social_network:login'), follow=True)
self.assertEqual(response.redirect_chain,
[('/social/main', 302), ('/social/main/', 301)])
def test_login_success(self):
with self.assertTemplateUsed('main.html'):
response = self.client.post(reverse('pyday_social_network:login'),
{'email': "<EMAIL>",
'password': '<PASSWORD>'},
follow=True)
self.assertEqual(response.redirect_chain, [('/social/main', 302),
('/social/main/', 301)])
def test_login_fail(self):
with self.assertTemplateUsed('error.html'):
response = self.client.post(reverse('pyday_social_network:login'),
{'email': "<EMAIL>",
'password': '<PASSWORD>'},
follow=True)
self.assertContains(response, "Invalid email/password")
def test_login_invalid_form(self):
with self.assertTemplateUsed('error.html'):
response = self.client.post(reverse('pyday_social_network:login'),
{'password': '<PASSWORD>'},
follow=True)
self.assertContains(response, "Invalid form")
def test_login_wrong_email(self):
with self.assertTemplateUsed('error.html'):
response = self.client.post(reverse('pyday_social_network:login'),
{'email': "<EMAIL>",
'password': '<PASSWORD>'},
follow=True)
self.assertContains(response, "Invalid email/password")
class ViewsTestLogged(TestCase):
def setUp(self):
self.client = Client()
self.user = PyDayUser.objects._create_user("<EMAIL>", "secret",
"MynameisWhat",
"MynameisWho")
self.client.login(email='<EMAIL>', password='<PASSWORD>')
def test_main(self):
with self.assertTemplateUsed('main.html'):
response = self.client.get(reverse('pyday_social_network:main'))
self.assertEqual(response.status_code, 200)
def test_logout(self):
with self.assertTemplateUsed('error.html'):
response = self.client.get(reverse('pyday_social_network:logout'))
self.assertEqual(response.status_code, 200)
def test_display_all_users(self):
self.display('pyday_social_network:all_users')
def display(self, name_url):
with self.assertTemplateUsed('all_users.html'):
response = self.client.get(reverse(name_url))
self.assertEqual(response.status_code, 200)
def test_display_following(self):
self.display('pyday_social_network:following')
def test_display_followers(self):
self.display('pyday_social_network:followers')
def test_display_friends(self):
self.display('pyday_social_network:friends')
def test_follow(self):
user2 = PyDayUser.objects._create_user("<EMAIL>", "secret",
"MynameisWhat",
"MynameisWho")
response = self.client.get(
'/social/follow/{}'.format(user2.id), follow=True)
self.assertTrue(PyDayUser.objects.get(email=user2.email)
in PyDayUser.objects.get(email=self.user.email).following)
self.assertTrue(PyDayUser.objects.get(email=self.user.email)
in PyDayUser.objects.get(email=user2.email).followers)
self.assertEqual(response.redirect_chain,
[('/social/profile', 302), ('/social/profile/', 301)])
def test_follow_already_following(self):
user2 = PyDayUser.objects._create_user("<EMAIL>a", "secret",
"MynameisWhat",
"MynameisWho")
self.user.follow(user2.id)
with self.assertTemplateUsed('error.html'):
response = self.client.get('/social/follow/{}'.format(user2.id))
self.assertContains(
response, "You have already followed this user")
def test_unfollow(self):
user2 = PyDayUser.objects._create_user("<EMAIL>", "secret",
"MynameisWhat",
"MynameisWho")
self.user.follow(user2.id)
response = self.client.get(
'/social/unfollow/{}'.format(user2.id), follow=True)
self.assertTrue(PyDayUser.objects.get(email=user2.email)
not in PyDayUser.objects.get(email=self.user.email).following)
self.assertTrue(PyDayUser.objects.get(email=self.user.email)
not in PyDayUser.objects.get(email=user2.email).followers)
self.assertEqual(response.redirect_chain,
[('/social/profile', 302), ('/social/profile/', 301)])
def test_unfollow_already_not_following(self):
user2 = PyDayUser.objects._create_user("<EMAIL>", "secret",
"MynameisWhat",
"MynameisWho")
with self.assertTemplateUsed('error.html'):
response = self.client.get('/social/unfollow/{}'.format(user2.id))
self.assertContains(response, "You do not follow this user")
def test_display_profile(self):
with self.assertTemplateUsed('profile.html'):
response = self.client.get(
"/social/profile/{}".format(self.user.id))
self.assertEqual(response.status_code, 200)
def test_display_non_existing(self):
with self.assertTemplateUsed('error.html'):
response = self.client.get("/social/profile/100")
self.assertContains(response, 'User does not exist.')
class ServicesTest(TestCase):
def test_regitster_user_post(self):
rf = RequestFactory()
post_request = rf.post('', {'email': "<EMAIL>",
'password': "<PASSWORD>",
'first_name': "MynameisWhat",
'last_name': "MynameisWho"})
self.assertTrue(register_user_post(post_request, RegisterUserForm))
self.assertEqual(len(PyDayUser.objects.all()), 1)
user = PyDayUser.objects.get(id=1)
self.assertEqual(user.email, "bla<EMAIL>a.<EMAIL>")
self.assertNotEqual(user.password, "<PASSWORD>")
self.assertEqual(user.first_name, "MynameisWhat")
self.assertEqual(user.last_name, "MynameisWho")
def test_current_events(self):
date_time = datetime.now()
user = PyDayUser.objects._create_user("<EMAIL>", "secret",
"MynameisWhat",
"MynameisWho")
event = Event(owner=user, from_time=date_time.hour,
to_time=date_time.hour + 1,
importance="important", caption="",
date=date_time, title="title")
event.save()
self.assertEqual(get_current_events(date_time.hour, date_time, user),
[event])
def test_get_greeting(self):
self.assertEqual(get_greeting(9), GREETINGS[0][2])
self.assertEqual(get_greeting(12), GREETINGS[1][2])
self.assertEqual(get_greeting(16), GREETINGS[2][2])
self.assertEqual(get_greeting(21), GREETINGS[3][2])
'''class FormTests(TestCase):
def test_form_upload_picture(self):
picture = open('./media/pictures/profile.jpg', 'rb')
file_dict = {'file': SimpleUploadedFile(picture.name, picture.read())}
form = UploadPictureForm(file_dict)
self.assertTrue(form.is_valid())
'''
```
#### File: PyDay/pyday/views.py
```python
from django.views.generic import View
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.shortcuts import render
from django.http import HttpResponseRedirect
class UploadView(View):
error_message = 'Invalid form!'
# @method_decorator(login_required)
# def get(self, request):
# form = self.form_class()
# return render(request, self.template_name, {'form': form})
@method_decorator(login_required)
def post(self, request, friends=None):
form = self.form_class(request.POST, request.FILES)
if form.is_valid():
self.post_function(request.user, form, friends)
return HttpResponseRedirect(self.success_url)
else:
return render(request, 'error.html', {'error': self.error_message})
def error404(request):
return render(request, 'error.html', {'error': "Error 404 - Page not found!"})
``` |
{
"source": "6enno/FarmXero",
"score": 3
} |
#### File: 6enno/FarmXero/coingeckoScraper.py
```python
import datetime
import requests
BASE_URL = 'https://api.coingecko.com/api/v3/'
HIST_EXT_URL = 'coins/filecoin/history'
#NOTE: Cannot query before mainnet launch 15-Oct-2020
def getRequestUrl(date):
params = '?date=' + date.strftime('%d-%m-%Y') + '&localization=true'
url = BASE_URL + HIST_EXT_URL + params
return url
def getDataForDate(date):
url = getRequestUrl(date)
response = requests.get(url).json()
return response
def getFilecoinNZDPriceOnDay(date):
ret = getDataForDate(date)
print(ret)
price = ret['market_data']['current_price']['nzd']
return price
# Tests
# d = datetime.date(2020,10,15)
# print(getFilecoinNZDPriceOnDay(d))
```
#### File: 6enno/FarmXero/data_folders.py
```python
import csv
import datetime
import os
import sqlite3
MESSAGE_ARCHIVE = 'archive/messages/'
BLOCKS_ARCHIVE = 'archive/blocks/'
JOURNAL_ARCHIVE = 'archive/journals.csv'
DATABASE_ARCHIVE = 'archive/farmxero.db'
def nanoFilToFil(nanoFil):
return nanoFil*(10**-18)
#Returns total values in NZD (or FIL if specified)
def getJournalTotals(startDate, endDate, valuesInFIL=False):
t = {}
t['collat'] = 0
t['minerFee'] = 0
t['burnFee'] = 0
t['slash'] = 0
t['transfers'] = 0
t['blockRewards'] = 0
offset = 10
multiplier = 1
if (valuesInFIL):
offset = 2
multiplier = nanoFilToFil(1)
with open(JOURNAL_ARCHIVE) as f:
jnls = csv.reader(f, delimiter=',')
i = 0
for j in jnls:
i += 1
if (i == 1):
continue
# print(j)
# print(type(j[0]))
date = datetime.datetime.strptime(j[0], '%d-%m-%Y').date()
if(date >= startDate and date <= endDate):
t['collat'] += float(j[offset+0]) * multiplier
t['minerFee'] += float(j[offset+1]) * multiplier
t['burnFee'] += float(j[offset+2]) * multiplier
t['slash'] += float(j[offset+3]) * multiplier
t['transfers'] += float(j[offset+4]) * multiplier
t['blockRewards'] += float(j[offset+4]) * multiplier
return t
def getMessagesTotals(startDate, endDate):
t = {}
t['collat'] = 0
t['minerFee'] = 0
t['burnFee'] = 0
t['slash'] = 0
t['transfers'] = 0
_, _, filenames = next(os.walk(MESSAGE_ARCHIVE))
# print(filenames);
for f in filenames:
date = datetime.datetime.strptime(f, 'msgs_%d-%m-%Y.csv').date()
if(date >= startDate and date <= endDate):
mt = getMessageTotals(MESSAGE_ARCHIVE + f)
t['collat'] += mt['collat']
t['minerFee'] += mt['minerFee']
t['burnFee'] += mt['burnFee']
t['slash'] += mt['slash']
t['transfers'] += mt['transfers']
return t
def getMessageTotals(filename):
t = {}
t['collat'] = 0
t['minerFee'] = 0
t['burnFee'] = 0
t['slash'] = 0
t['transfers'] = 0
with open(filename) as f:
msgs = csv.reader(f, delimiter=',')
i = 0
for m in msgs:
i += 1
if (i == 1):
continue
# print(m)
t['collat'] += nanoFilToFil(float(m[4]))
t['minerFee'] += nanoFilToFil(float(m[5]))
t['burnFee'] += nanoFilToFil(float(m[6]))
t['slash'] += nanoFilToFil(float(m[7]))
t['transfers'] += nanoFilToFil(float(m[3]))
# print(t)
return t
# This is a quick rec that checks messages have been totaled and checks this
# against the archived Journals in FIL
# Does not check for errored or duplicated messages
def quickRecFIL(startDate, endDate, tolerance=0.001):
mTotal = getMessagesTotals(startDate, endDate)
jTotal = getJournalTotals(startDate, endDate, valuesInFIL=True)
print(mTotal)
print(jTotal)
if(abs(mTotal['collat'] - jTotal['collat']) > tolerance):
print('collat did not rec!!' + str(mTotal['collat'] - jTotal['collat']))
return 1
if(abs(mTotal['minerFee'] - jTotal['minerFee']) > tolerance):
print('minerFee did not rec!!')
return 1
if(abs(mTotal['burnFee'] - jTotal['burnFee']) > tolerance):
print('burnFee did not rec!!')
return 1
if(abs(mTotal['slash'] - jTotal['slash']) > tolerance):
print('slash did not rec!!')
return 1
if(abs(mTotal['transfers'] - jTotal['transfers']) > tolerance):
print('transfers did not rec!!')
return 1
print('all recs checked')
return 0
def connectDB(dbFile=DATABASE_ARCHIVE):
conn = sqlite3.connect(dbFile)
return conn
def createMesagesDB(conn):
sql = '''
CREATE TABLE MESSAGES (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
cid TEXT,
datetime DATETIME,
from_wallet TEXT,
to_wallet TEXT,
collat FLOAT,
miner_fee FLOAT,
burn_fee FLOAT,
slash FLOAT,
transfer FLOAT
);'''
with conn:
c = conn.cursor()
c.execute(sql)
def addMessageToDB(conn, cid, datetime, fromWallet, toWallet, collat, minerFee, burnFee, slash, transfer):
sql = '''
INSERT INTO MESSAGES
(cid, datetime, from_wallet, to_wallet, collat, miner_fee, burn_fee, slash, transfer)
values(?, ?, ?, ?, ?, ?, ?, ?, ?)
'''
data = [
(
cid, datetime,
fromWallet,
toWallet,
nanoFilToFil(collat),
nanoFilToFil(minerFee),
nanoFilToFil(burnFee),
nanoFilToFil(slash),
nanoFilToFil(transfer)
)
]
conn.executemany(sql, data)
def getAllMessages(conn):
sql = 'SELECT * FROM MESSAGES'
with conn:
data = conn.execute(sql)
for d in data:
print(d)
if __name__ == '__main__':
c = connectDB()
# createMesagesDB(c)
addMessageToDB(c, 'testid', datetime.datetime.now(), 'fromwallet233425', 'towallettsdfaasdfasd', 100000000, 2222222, 3333333, 444444, 5555555)
getAllMessages(c)
```
#### File: 6enno/FarmXero/FilfoxScraper.py
```python
import requests
import json
import datetime
import time
import Addresses
import argparse
# This module scrapes data from filfox.info/ and populates a table[] of data
# This will be a 'table[]' of 'rows{}'
# May also output a csv with the following headers:
# MessageID, type, timestamp, transfer, collateral, miner-fee, burn-fee
MAX_MESSAGE_PAGES = 1000
minerAddress = Addresses.minerAddress
def messagesUrl(address, page):
return 'https://filfox.info/api/v1/address/'+address+'/messages?page='+str(page)+'&pageSize=100'
def messageDetailsUrl(address):
return 'https://filfox.info/api/v1/message/'+address
def blocksUrl(address, page):
return 'https://filfox.info/api/v1/address/'+address+'/blocks?pageSize=100&page='+str(page)
def txnUrl(address, page):
return 'https://filfox.info/api/v1/address/'+address+'/transfers?pageSize=100&page='+str(page)
def printTableCsv(table):
csvString = 'messageId, type, timestamp, transfer, collateral, miner-fee, burn-fee, slash, status\n'
for r in table:
csvString = csvString +\
r['cid']+','+\
r['type']+','+\
str(r['timestamp']) + ',' +\
str(r['transfer']) + ',' +\
str(r['collateral']) + ',' +\
str(r['miner-fee']) + ',' +\
str(r['burn-fee']) + ',' +\
str(r['slash']) + ',' +\
str(r['status']) + '\n'
return csvString
def writeTableToCSV(filename, table):
f = open(filename, 'w+')
f.write(printTableCsv(table))
f.close()
return 0
def printBlockTableCsv(table):
csvString = 'messageId, type, timestamp, reward\n'
for r in table:
csvString = csvString +\
r['cid']+','+\
str(r['timestamp']) + ',' +\
str(r['win']) + '\n'
return csvString
def writeBlockTableToCSV(filename, table):
f = open(filename, 'w+')
f.write(printBlockTableCsv(table))
f.close()
return 0
def printTxnTableCsv(table):
csvString = 'Height, Timestamp, Message, From, To, Value, Type\n'
for r in table:
csvRow = ', '.join('{}'.format(v) for k,v in r.items()) + '\n'
csvString = csvString + csvRow
return csvString
def writeTxnTableToCSV(filename, table):
f = open(filename, 'w+')
f.write(printTxnTableCsv(table))
f.close()
return 0
# This pull relevent data from messages over a date range
# Note that time works in reverse for timestamps start = latest time, end = earliest time
#
# @endDate is a datetime.date() type eg the start of the day you want to see msgs for
# @startDate is a datetime.date() time eg the start of the day where you want to stop getting msgs
# @wallet is a string eg f02xxxx for miner or longer for control
def getMessageTableForDateRange(endDate, startDate, wallet):
table = []
count = 0
timeStart = int(time.mktime(startDate.timetuple())) #Local NZ time
timeEnd = int(time.mktime(endDate.timetuple())) #Local NZ time
timestampReached = False
allMsgs = []
for page in range(0, MAX_MESSAGE_PAGES):
if timestampReached: break
print('about to send page request: '+ messagesUrl(wallet, page))
minerMessages = requests.get(messagesUrl(wallet, page)).json()
if(len(minerMessages['messages']) == 0):
print('Reached end of messages..')
print(minerMessages)
break
for m in minerMessages['messages']:
#count = count + 1
#if count > 30:
# break
# ==== TODO ==== Check if there is this message in the DB and skip if there is
if m['timestamp'] > timeStart: #larger timestamps are later message > starttime
print('timestamp ('+str(m['timestamp'])+') before timestart ' + str(timeStart))
continue
elif m['timestamp'] <= timeEnd:
print('timestamp ('+str(m['timestamp'])+') after timeend ' + str(timeEnd))
timestampReached = True
break
else:
allMsgs.append(m)
for m in allMsgs:
# count = count + 1
# print('found a message within timestamp range ' + str(count))
try:
row = {
'cid':m['cid'],
'type':m['method'],
'timestamp':m['timestamp'],
'transfer':0,
'collateral':0,
'miner-fee':0,
'burn-fee':0,
'slash':0,
'status':int(m['receipt']['exitCode'])
}
except KeyError:
print('message status unknown: '+m.get('cid'))
continue
# print(' getting msg deets...')
messageDeets = requests.get(messageDetailsUrl(m['cid'])).json()
# print(' got msg deets...')
for t in messageDeets['transfers']:
# transfers and collat can go out or in but always show positive in messages (lets reverse the ones that are from this wallet)
direction = 1
# we ignore burn fees, slashes, minerfees and collat if theyre not from this wallet to avoid double counting
fromThis = 0
if (t['from'] == wallet):
direction = -1
fromThis = 1
if t['type'] == 'burn-fee':
row['burn-fee'] = int(t['value']) * fromThis
elif t['type'] == 'burn':
row['slash'] = int(t['value']) * fromThis
elif t['type'] == 'miner-fee':
row['miner-fee'] = int(t['value']) * fromThis
elif t['type'] == 'transfer':
if row['status'] != 0:
pass
elif messageDeets['method'] == 'PreCommitSector' or messageDeets['method'] == 'ProveCommitSector':
row['collateral'] = fromThis * int(t['value'])
else:
row['transfer'] = direction * int(t['value'])
else:
print ('unknown message type: ' + t['type'])
table.append(row)
#print table
#print 'found '+str(count)+ ' messages'
print('found all '+str(len(allMsgs))+' messages for wallet ' + wallet)
return table
# This gets all the blocks that the miner has won over a length of time
# Note that time works in reverse for timestamps start = latest time, end = earliest time
#
# @endDate is a datetime.date() type eg the start of the day you want to see msgs for
# @startDate is a datetime.date() time eg the start of the day where you want to stop getting msgs
# @wallet is a string that represents a miner wallet eg f02xxxx for miner or longer for control
def getBlocksTableForDateRange(endDate, startDate, wallet):
table = []
count = 0
timeStart = int(time.mktime(startDate.timetuple())) #Local NZ time
timeEnd = int(time.mktime(endDate.timetuple())) #Local NZ time
timestampReached = False
for page in range(0, MAX_MESSAGE_PAGES):
if timestampReached: break
print('about to send page request')
minerBlocks = requests.get(blocksUrl(wallet, page)).json()
print('total blocks: ' + str(minerBlocks['totalCount']))
if(len(minerBlocks['blocks']) == 0):
print('Reached end of blocks')
break
for b in minerBlocks['blocks']:
# print('reward '+str(b['reward']))
#count = count + 1
#if count > 30:
# break
if b['timestamp'] > timeStart: #larger timestamps are later message > starttime
# print('timestamp ('+str(b['timestamp'])+') before timestart ' + str(timeStart))
continue
if b['timestamp'] <= timeEnd:
# print('timestamp ('+str(b['timestamp'])+') after timeend ' + str(timeEnd))
timestampReached = True
break
count = count + 1
# print('found a block within timestamp range ' + str(count))
row = {
'cid':b['cid'],
'timestamp':b['timestamp'],
'win':b['reward'],
}
table.append(row)
#print table
#print 'found '+str(count)+ ' messages'
return table
def getSimpleTxnJson(endDate, startDate, wallet):
table = []
count = 0
timeStart = int(time.mktime(startDate.timetuple())) #Local NZ time
timeEnd = int(time.mktime(endDate.timetuple())) #Local NZ time
timestampReached = False
for page in range(0, MAX_MESSAGE_PAGES):
if timestampReached: break
print('about to send txn page request pg ' + str(page))
txns = requests.get(txnUrl(wallet, page)).json()
if(txns['totalCount'] == 0):
print('Reached end of txns')
break
for t in txns['transfers']:
if t['timestamp'] > timeStart: #larger timestamps are later message > starttime
print('timestamp ('+str(t['timestamp'])+') before timestart ' + str(timeStart))
continue
if t['timestamp'] <= timeEnd:
print('timestamp ('+str(t['timestamp'])+') after timeend ' + str(timeEnd))
timestampReached = True
break
count = count + 1
# print('found a block within timestamp range ' + str(count))
row = {
'Height':t['height'],
'Timestamp':t['timestamp'],
'Message':t['message'],
'From':t['from'],
'To':t['to'],
'Value':t['value'],
'Type':t['type'],
}
# print('row logged')
table.append(row)
#print table
#print 'found '+str(count)+ ' messages'
return table
# Can run this as standalone to grab filfox transactions
if __name__ == '__main__':
p = argparse.ArgumentParser(description='FilfoxScraper - Get data from Filfox')
p.add_argument('-b', '--blocks', help='get blocks instead of messages (only applies to miner addresses)', required=False, default=False, action='store_true')
p.add_argument('-t', '--transactions', help='get simple list of transactions', required=False, default=False, action='store_true')
p.add_argument('-w', '--wallet', help='specify the wallet address you want transactions for', required=False, default=Addresses.minerAddress)
p.add_argument('-s', '--start', help='specify the start date that you want to get transactions from (format yyyy-mm-dd)', required=False, default='2020-01-01')
p.add_argument('-e', '--end', help='specify the end date that you want to get transactions until (format yyyy-mm-dd)', required=False, default=datetime.date.today().isoformat())
p.add_argument('-f', '--filesave', help='specify the file to save the csv output to', required=False)
args = p.parse_args()
# startDate = datetime.date.fromisoformat(args.start)
startDate = datetime.datetime.strptime(args.start, "%Y-%m-%d")
# endDate = datetime.date.fromisoformat(args.end)
endDate = datetime.datetime.strptime(args.end, "%Y-%m-%d")
print(startDate)
print(endDate)
table = {}
if (args.blocks):
table = getBlocksTableForDateRange(startDate, endDate, args.wallet)
printBlockTableCsv(table)
if(args.filesave):
writeBlockTableToCSV(args.filesave, table)
elif(args.transactions):
table = getSimpleTxnJson(startDate, endDate, args.wallet)
print(printTxnTableCsv(table))
if(args.filesave):
writeTxnTableToCSV(args.filesave, table)
else:
table = getMessageTableForDateRange(startDate, endDate, args.wallet)
printTableCsv(table)
if(args.filesave):
writeTableToCSV(args.filesave, table)
# addr = Addresses.minerAddress
# # addr = Addresses.wallet5
#
# t = getMessageTableForDateRange(datetime.date(2021,3,25), datetime.date(2021,3,26), addr)
# print(t)
```
#### File: accounting/models/allocation.py
```python
import re # noqa: F401
from xero_python.models import BaseModel
class Allocation(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"invoice": "Invoice",
"overpayment": "Overpayment",
"prepayment": "Prepayment",
"credit_note": "CreditNote",
"amount": "float",
"date": "date[ms-format]",
"status_attribute_string": "str",
"validation_errors": "list[ValidationError]",
}
attribute_map = {
"invoice": "Invoice",
"overpayment": "Overpayment",
"prepayment": "Prepayment",
"credit_note": "CreditNote",
"amount": "Amount",
"date": "Date",
"status_attribute_string": "StatusAttributeString",
"validation_errors": "ValidationErrors",
}
def __init__(
self,
invoice=None,
overpayment=None,
prepayment=None,
credit_note=None,
amount=None,
date=None,
status_attribute_string=None,
validation_errors=None,
): # noqa: E501
"""Allocation - a model defined in OpenAPI""" # noqa: E501
self._invoice = None
self._overpayment = None
self._prepayment = None
self._credit_note = None
self._amount = None
self._date = None
self._status_attribute_string = None
self._validation_errors = None
self.discriminator = None
self.invoice = invoice
if overpayment is not None:
self.overpayment = overpayment
if prepayment is not None:
self.prepayment = prepayment
if credit_note is not None:
self.credit_note = credit_note
self.amount = amount
self.date = date
if status_attribute_string is not None:
self.status_attribute_string = status_attribute_string
if validation_errors is not None:
self.validation_errors = validation_errors
@property
def invoice(self):
"""Gets the invoice of this Allocation. # noqa: E501
:return: The invoice of this Allocation. # noqa: E501
:rtype: Invoice
"""
return self._invoice
@invoice.setter
def invoice(self, invoice):
"""Sets the invoice of this Allocation.
:param invoice: The invoice of this Allocation. # noqa: E501
:type: Invoice
"""
if invoice is None:
raise ValueError(
"Invalid value for `invoice`, must not be `None`"
) # noqa: E501
self._invoice = invoice
@property
def overpayment(self):
"""Gets the overpayment of this Allocation. # noqa: E501
:return: The overpayment of this Allocation. # noqa: E501
:rtype: Overpayment
"""
return self._overpayment
@overpayment.setter
def overpayment(self, overpayment):
"""Sets the overpayment of this Allocation.
:param overpayment: The overpayment of this Allocation. # noqa: E501
:type: Overpayment
"""
self._overpayment = overpayment
@property
def prepayment(self):
"""Gets the prepayment of this Allocation. # noqa: E501
:return: The prepayment of this Allocation. # noqa: E501
:rtype: Prepayment
"""
return self._prepayment
@prepayment.setter
def prepayment(self, prepayment):
"""Sets the prepayment of this Allocation.
:param prepayment: The prepayment of this Allocation. # noqa: E501
:type: Prepayment
"""
self._prepayment = prepayment
@property
def credit_note(self):
"""Gets the credit_note of this Allocation. # noqa: E501
:return: The credit_note of this Allocation. # noqa: E501
:rtype: CreditNote
"""
return self._credit_note
@credit_note.setter
def credit_note(self, credit_note):
"""Sets the credit_note of this Allocation.
:param credit_note: The credit_note of this Allocation. # noqa: E501
:type: CreditNote
"""
self._credit_note = credit_note
@property
def amount(self):
"""Gets the amount of this Allocation. # noqa: E501
the amount being applied to the invoice # noqa: E501
:return: The amount of this Allocation. # noqa: E501
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this Allocation.
the amount being applied to the invoice # noqa: E501
:param amount: The amount of this Allocation. # noqa: E501
:type: float
"""
if amount is None:
raise ValueError(
"Invalid value for `amount`, must not be `None`"
) # noqa: E501
self._amount = amount
@property
def date(self):
"""Gets the date of this Allocation. # noqa: E501
the date the allocation is applied YYYY-MM-DD. # noqa: E501
:return: The date of this Allocation. # noqa: E501
:rtype: date
"""
return self._date
@date.setter
def date(self, date):
"""Sets the date of this Allocation.
the date the allocation is applied YYYY-MM-DD. # noqa: E501
:param date: The date of this Allocation. # noqa: E501
:type: date
"""
if date is None:
raise ValueError(
"Invalid value for `date`, must not be `None`"
) # noqa: E501
self._date = date
@property
def status_attribute_string(self):
"""Gets the status_attribute_string of this Allocation. # noqa: E501
A string to indicate if a invoice status # noqa: E501
:return: The status_attribute_string of this Allocation. # noqa: E501
:rtype: str
"""
return self._status_attribute_string
@status_attribute_string.setter
def status_attribute_string(self, status_attribute_string):
"""Sets the status_attribute_string of this Allocation.
A string to indicate if a invoice status # noqa: E501
:param status_attribute_string: The status_attribute_string of this Allocation. # noqa: E501
:type: str
"""
self._status_attribute_string = status_attribute_string
@property
def validation_errors(self):
"""Gets the validation_errors of this Allocation. # noqa: E501
Displays array of validation error messages from the API # noqa: E501
:return: The validation_errors of this Allocation. # noqa: E501
:rtype: list[ValidationError]
"""
return self._validation_errors
@validation_errors.setter
def validation_errors(self, validation_errors):
"""Sets the validation_errors of this Allocation.
Displays array of validation error messages from the API # noqa: E501
:param validation_errors: The validation_errors of this Allocation. # noqa: E501
:type: list[ValidationError]
"""
self._validation_errors = validation_errors
```
#### File: accounting/models/bank_transaction.py
```python
import re # noqa: F401
from xero_python.models import BaseModel
class BankTransaction(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"type": "str",
"contact": "Contact",
"line_items": "list[LineItem]",
"bank_account": "Account",
"is_reconciled": "bool",
"date": "date[ms-format]",
"reference": "str",
"currency_code": "CurrencyCode",
"currency_rate": "float",
"url": "str",
"status": "str",
"line_amount_types": "LineAmountTypes",
"sub_total": "float",
"total_tax": "float",
"total": "float",
"bank_transaction_id": "str",
"prepayment_id": "str",
"overpayment_id": "str",
"updated_date_utc": "datetime[ms-format]",
"has_attachments": "bool",
"status_attribute_string": "str",
"validation_errors": "list[ValidationError]",
}
attribute_map = {
"type": "Type",
"contact": "Contact",
"line_items": "LineItems",
"bank_account": "BankAccount",
"is_reconciled": "IsReconciled",
"date": "Date",
"reference": "Reference",
"currency_code": "CurrencyCode",
"currency_rate": "CurrencyRate",
"url": "Url",
"status": "Status",
"line_amount_types": "LineAmountTypes",
"sub_total": "SubTotal",
"total_tax": "TotalTax",
"total": "Total",
"bank_transaction_id": "BankTransactionID",
"prepayment_id": "PrepaymentID",
"overpayment_id": "OverpaymentID",
"updated_date_utc": "UpdatedDateUTC",
"has_attachments": "HasAttachments",
"status_attribute_string": "StatusAttributeString",
"validation_errors": "ValidationErrors",
}
def __init__(
self,
type=None,
contact=None,
line_items=None,
bank_account=None,
is_reconciled=None,
date=None,
reference=None,
currency_code=None,
currency_rate=None,
url=None,
status=None,
line_amount_types=None,
sub_total=None,
total_tax=None,
total=None,
bank_transaction_id=None,
prepayment_id=None,
overpayment_id=None,
updated_date_utc=None,
has_attachments=False,
status_attribute_string=None,
validation_errors=None,
): # noqa: E501
"""BankTransaction - a model defined in OpenAPI""" # noqa: E501
self._type = None
self._contact = None
self._line_items = None
self._bank_account = None
self._is_reconciled = None
self._date = None
self._reference = None
self._currency_code = None
self._currency_rate = None
self._url = None
self._status = None
self._line_amount_types = None
self._sub_total = None
self._total_tax = None
self._total = None
self._bank_transaction_id = None
self._prepayment_id = None
self._overpayment_id = None
self._updated_date_utc = None
self._has_attachments = None
self._status_attribute_string = None
self._validation_errors = None
self.discriminator = None
self.type = type
if contact is not None:
self.contact = contact
self.line_items = line_items
self.bank_account = bank_account
if is_reconciled is not None:
self.is_reconciled = is_reconciled
if date is not None:
self.date = date
if reference is not None:
self.reference = reference
if currency_code is not None:
self.currency_code = currency_code
if currency_rate is not None:
self.currency_rate = currency_rate
if url is not None:
self.url = url
if status is not None:
self.status = status
if line_amount_types is not None:
self.line_amount_types = line_amount_types
if sub_total is not None:
self.sub_total = sub_total
if total_tax is not None:
self.total_tax = total_tax
if total is not None:
self.total = total
if bank_transaction_id is not None:
self.bank_transaction_id = bank_transaction_id
if prepayment_id is not None:
self.prepayment_id = prepayment_id
if overpayment_id is not None:
self.overpayment_id = overpayment_id
if updated_date_utc is not None:
self.updated_date_utc = updated_date_utc
if has_attachments is not None:
self.has_attachments = has_attachments
if status_attribute_string is not None:
self.status_attribute_string = status_attribute_string
if validation_errors is not None:
self.validation_errors = validation_errors
@property
def type(self):
"""Gets the type of this BankTransaction. # noqa: E501
See Bank Transaction Types # noqa: E501
:return: The type of this BankTransaction. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this BankTransaction.
See Bank Transaction Types # noqa: E501
:param type: The type of this BankTransaction. # noqa: E501
:type: str
"""
if type is None:
raise ValueError(
"Invalid value for `type`, must not be `None`"
) # noqa: E501
allowed_values = [
"RECEIVE",
"RECEIVE-OVERPAYMENT",
"RECEIVE-PREPAYMENT",
"SPEND",
"SPEND-OVERPAYMENT",
"SPEND-PREPAYMENT",
"RECEIVE-TRANSFER",
"SPEND-TRANSFER",
"None",
] # noqa: E501
if type:
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}".format( # noqa: E501
type, allowed_values
)
)
self._type = type
@property
def contact(self):
"""Gets the contact of this BankTransaction. # noqa: E501
:return: The contact of this BankTransaction. # noqa: E501
:rtype: Contact
"""
return self._contact
@contact.setter
def contact(self, contact):
"""Sets the contact of this BankTransaction.
:param contact: The contact of this BankTransaction. # noqa: E501
:type: Contact
"""
self._contact = contact
@property
def line_items(self):
"""Gets the line_items of this BankTransaction. # noqa: E501
See LineItems # noqa: E501
:return: The line_items of this BankTransaction. # noqa: E501
:rtype: list[LineItem]
"""
return self._line_items
@line_items.setter
def line_items(self, line_items):
"""Sets the line_items of this BankTransaction.
See LineItems # noqa: E501
:param line_items: The line_items of this BankTransaction. # noqa: E501
:type: list[LineItem]
"""
if line_items is None:
raise ValueError(
"Invalid value for `line_items`, must not be `None`"
) # noqa: E501
self._line_items = line_items
@property
def bank_account(self):
"""Gets the bank_account of this BankTransaction. # noqa: E501
:return: The bank_account of this BankTransaction. # noqa: E501
:rtype: Account
"""
return self._bank_account
@bank_account.setter
def bank_account(self, bank_account):
"""Sets the bank_account of this BankTransaction.
:param bank_account: The bank_account of this BankTransaction. # noqa: E501
:type: Account
"""
if bank_account is None:
raise ValueError(
"Invalid value for `bank_account`, must not be `None`"
) # noqa: E501
self._bank_account = bank_account
@property
def is_reconciled(self):
"""Gets the is_reconciled of this BankTransaction. # noqa: E501
Boolean to show if transaction is reconciled # noqa: E501
:return: The is_reconciled of this BankTransaction. # noqa: E501
:rtype: bool
"""
return self._is_reconciled
@is_reconciled.setter
def is_reconciled(self, is_reconciled):
"""Sets the is_reconciled of this BankTransaction.
Boolean to show if transaction is reconciled # noqa: E501
:param is_reconciled: The is_reconciled of this BankTransaction. # noqa: E501
:type: bool
"""
self._is_reconciled = is_reconciled
@property
def date(self):
"""Gets the date of this BankTransaction. # noqa: E501
Date of transaction – YYYY-MM-DD # noqa: E501
:return: The date of this BankTransaction. # noqa: E501
:rtype: date
"""
return self._date
@date.setter
def date(self, date):
"""Sets the date of this BankTransaction.
Date of transaction – YYYY-MM-DD # noqa: E501
:param date: The date of this BankTransaction. # noqa: E501
:type: date
"""
self._date = date
@property
def reference(self):
"""Gets the reference of this BankTransaction. # noqa: E501
Reference for the transaction. Only supported for SPEND and RECEIVE transactions. # noqa: E501
:return: The reference of this BankTransaction. # noqa: E501
:rtype: str
"""
return self._reference
@reference.setter
def reference(self, reference):
"""Sets the reference of this BankTransaction.
Reference for the transaction. Only supported for SPEND and RECEIVE transactions. # noqa: E501
:param reference: The reference of this BankTransaction. # noqa: E501
:type: str
"""
self._reference = reference
@property
def currency_code(self):
"""Gets the currency_code of this BankTransaction. # noqa: E501
:return: The currency_code of this BankTransaction. # noqa: E501
:rtype: CurrencyCode
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this BankTransaction.
:param currency_code: The currency_code of this BankTransaction. # noqa: E501
:type: CurrencyCode
"""
self._currency_code = currency_code
@property
def currency_rate(self):
"""Gets the currency_rate of this BankTransaction. # noqa: E501
Exchange rate to base currency when money is spent or received. e.g.0.7500 Only used for bank transactions in non base currency. If this isn’t specified for non base currency accounts then either the user-defined rate (preference) or the XE.com day rate will be used. Setting currency is only supported on overpayments. # noqa: E501
:return: The currency_rate of this BankTransaction. # noqa: E501
:rtype: float
"""
return self._currency_rate
@currency_rate.setter
def currency_rate(self, currency_rate):
"""Sets the currency_rate of this BankTransaction.
Exchange rate to base currency when money is spent or received. e.g.0.7500 Only used for bank transactions in non base currency. If this isn’t specified for non base currency accounts then either the user-defined rate (preference) or the XE.com day rate will be used. Setting currency is only supported on overpayments. # noqa: E501
:param currency_rate: The currency_rate of this BankTransaction. # noqa: E501
:type: float
"""
self._currency_rate = currency_rate
@property
def url(self):
"""Gets the url of this BankTransaction. # noqa: E501
URL link to a source document – shown as “Go to App Name” # noqa: E501
:return: The url of this BankTransaction. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this BankTransaction.
URL link to a source document – shown as “Go to App Name” # noqa: E501
:param url: The url of this BankTransaction. # noqa: E501
:type: str
"""
self._url = url
@property
def status(self):
"""Gets the status of this BankTransaction. # noqa: E501
See Bank Transaction Status Codes # noqa: E501
:return: The status of this BankTransaction. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this BankTransaction.
See Bank Transaction Status Codes # noqa: E501
:param status: The status of this BankTransaction. # noqa: E501
:type: str
"""
allowed_values = ["AUTHORISED", "DELETED", "VOIDED", "None"] # noqa: E501
if status:
if status not in allowed_values:
raise ValueError(
"Invalid value for `status` ({0}), must be one of {1}".format( # noqa: E501
status, allowed_values
)
)
self._status = status
@property
def line_amount_types(self):
"""Gets the line_amount_types of this BankTransaction. # noqa: E501
:return: The line_amount_types of this BankTransaction. # noqa: E501
:rtype: LineAmountTypes
"""
return self._line_amount_types
@line_amount_types.setter
def line_amount_types(self, line_amount_types):
"""Sets the line_amount_types of this BankTransaction.
:param line_amount_types: The line_amount_types of this BankTransaction. # noqa: E501
:type: LineAmountTypes
"""
self._line_amount_types = line_amount_types
@property
def sub_total(self):
"""Gets the sub_total of this BankTransaction. # noqa: E501
Total of bank transaction excluding taxes # noqa: E501
:return: The sub_total of this BankTransaction. # noqa: E501
:rtype: float
"""
return self._sub_total
@sub_total.setter
def sub_total(self, sub_total):
"""Sets the sub_total of this BankTransaction.
Total of bank transaction excluding taxes # noqa: E501
:param sub_total: The sub_total of this BankTransaction. # noqa: E501
:type: float
"""
self._sub_total = sub_total
@property
def total_tax(self):
"""Gets the total_tax of this BankTransaction. # noqa: E501
Total tax on bank transaction # noqa: E501
:return: The total_tax of this BankTransaction. # noqa: E501
:rtype: float
"""
return self._total_tax
@total_tax.setter
def total_tax(self, total_tax):
"""Sets the total_tax of this BankTransaction.
Total tax on bank transaction # noqa: E501
:param total_tax: The total_tax of this BankTransaction. # noqa: E501
:type: float
"""
self._total_tax = total_tax
@property
def total(self):
"""Gets the total of this BankTransaction. # noqa: E501
Total of bank transaction tax inclusive # noqa: E501
:return: The total of this BankTransaction. # noqa: E501
:rtype: float
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this BankTransaction.
Total of bank transaction tax inclusive # noqa: E501
:param total: The total of this BankTransaction. # noqa: E501
:type: float
"""
self._total = total
@property
def bank_transaction_id(self):
"""Gets the bank_transaction_id of this BankTransaction. # noqa: E501
Xero generated unique identifier for bank transaction # noqa: E501
:return: The bank_transaction_id of this BankTransaction. # noqa: E501
:rtype: str
"""
return self._bank_transaction_id
@bank_transaction_id.setter
def bank_transaction_id(self, bank_transaction_id):
"""Sets the bank_transaction_id of this BankTransaction.
Xero generated unique identifier for bank transaction # noqa: E501
:param bank_transaction_id: The bank_transaction_id of this BankTransaction. # noqa: E501
:type: str
"""
self._bank_transaction_id = bank_transaction_id
@property
def prepayment_id(self):
"""Gets the prepayment_id of this BankTransaction. # noqa: E501
Xero generated unique identifier for a Prepayment. This will be returned on BankTransactions with a Type of SPEND-PREPAYMENT or RECEIVE-PREPAYMENT # noqa: E501
:return: The prepayment_id of this BankTransaction. # noqa: E501
:rtype: str
"""
return self._prepayment_id
@prepayment_id.setter
def prepayment_id(self, prepayment_id):
"""Sets the prepayment_id of this BankTransaction.
Xero generated unique identifier for a Prepayment. This will be returned on BankTransactions with a Type of SPEND-PREPAYMENT or RECEIVE-PREPAYMENT # noqa: E501
:param prepayment_id: The prepayment_id of this BankTransaction. # noqa: E501
:type: str
"""
self._prepayment_id = prepayment_id
@property
def overpayment_id(self):
"""Gets the overpayment_id of this BankTransaction. # noqa: E501
Xero generated unique identifier for an Overpayment. This will be returned on BankTransactions with a Type of SPEND-OVERPAYMENT or RECEIVE-OVERPAYMENT # noqa: E501
:return: The overpayment_id of this BankTransaction. # noqa: E501
:rtype: str
"""
return self._overpayment_id
@overpayment_id.setter
def overpayment_id(self, overpayment_id):
"""Sets the overpayment_id of this BankTransaction.
Xero generated unique identifier for an Overpayment. This will be returned on BankTransactions with a Type of SPEND-OVERPAYMENT or RECEIVE-OVERPAYMENT # noqa: E501
:param overpayment_id: The overpayment_id of this BankTransaction. # noqa: E501
:type: str
"""
self._overpayment_id = overpayment_id
@property
def updated_date_utc(self):
"""Gets the updated_date_utc of this BankTransaction. # noqa: E501
Last modified date UTC format # noqa: E501
:return: The updated_date_utc of this BankTransaction. # noqa: E501
:rtype: datetime
"""
return self._updated_date_utc
@updated_date_utc.setter
def updated_date_utc(self, updated_date_utc):
"""Sets the updated_date_utc of this BankTransaction.
Last modified date UTC format # noqa: E501
:param updated_date_utc: The updated_date_utc of this BankTransaction. # noqa: E501
:type: datetime
"""
self._updated_date_utc = updated_date_utc
@property
def has_attachments(self):
"""Gets the has_attachments of this BankTransaction. # noqa: E501
Boolean to indicate if a bank transaction has an attachment # noqa: E501
:return: The has_attachments of this BankTransaction. # noqa: E501
:rtype: bool
"""
return self._has_attachments
@has_attachments.setter
def has_attachments(self, has_attachments):
"""Sets the has_attachments of this BankTransaction.
Boolean to indicate if a bank transaction has an attachment # noqa: E501
:param has_attachments: The has_attachments of this BankTransaction. # noqa: E501
:type: bool
"""
self._has_attachments = has_attachments
@property
def status_attribute_string(self):
"""Gets the status_attribute_string of this BankTransaction. # noqa: E501
A string to indicate if a invoice status # noqa: E501
:return: The status_attribute_string of this BankTransaction. # noqa: E501
:rtype: str
"""
return self._status_attribute_string
@status_attribute_string.setter
def status_attribute_string(self, status_attribute_string):
"""Sets the status_attribute_string of this BankTransaction.
A string to indicate if a invoice status # noqa: E501
:param status_attribute_string: The status_attribute_string of this BankTransaction. # noqa: E501
:type: str
"""
self._status_attribute_string = status_attribute_string
@property
def validation_errors(self):
"""Gets the validation_errors of this BankTransaction. # noqa: E501
Displays array of validation error messages from the API # noqa: E501
:return: The validation_errors of this BankTransaction. # noqa: E501
:rtype: list[ValidationError]
"""
return self._validation_errors
@validation_errors.setter
def validation_errors(self, validation_errors):
"""Sets the validation_errors of this BankTransaction.
Displays array of validation error messages from the API # noqa: E501
:param validation_errors: The validation_errors of this BankTransaction. # noqa: E501
:type: list[ValidationError]
"""
self._validation_errors = validation_errors
```
#### File: accounting/models/branding_theme.py
```python
import re # noqa: F401
from xero_python.models import BaseModel
class BrandingTheme(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"branding_theme_id": "str",
"name": "str",
"logo_url": "str",
"type": "str",
"sort_order": "int",
"created_date_utc": "datetime[ms-format]",
}
attribute_map = {
"branding_theme_id": "BrandingThemeID",
"name": "Name",
"logo_url": "LogoUrl",
"type": "Type",
"sort_order": "SortOrder",
"created_date_utc": "CreatedDateUTC",
}
def __init__(
self,
branding_theme_id=None,
name=None,
logo_url=None,
type=None,
sort_order=None,
created_date_utc=None,
): # noqa: E501
"""BrandingTheme - a model defined in OpenAPI""" # noqa: E501
self._branding_theme_id = None
self._name = None
self._logo_url = None
self._type = None
self._sort_order = None
self._created_date_utc = None
self.discriminator = None
if branding_theme_id is not None:
self.branding_theme_id = branding_theme_id
if name is not None:
self.name = name
if logo_url is not None:
self.logo_url = logo_url
if type is not None:
self.type = type
if sort_order is not None:
self.sort_order = sort_order
if created_date_utc is not None:
self.created_date_utc = created_date_utc
@property
def branding_theme_id(self):
"""Gets the branding_theme_id of this BrandingTheme. # noqa: E501
Xero identifier # noqa: E501
:return: The branding_theme_id of this BrandingTheme. # noqa: E501
:rtype: str
"""
return self._branding_theme_id
@branding_theme_id.setter
def branding_theme_id(self, branding_theme_id):
"""Sets the branding_theme_id of this BrandingTheme.
Xero identifier # noqa: E501
:param branding_theme_id: The branding_theme_id of this BrandingTheme. # noqa: E501
:type: str
"""
self._branding_theme_id = branding_theme_id
@property
def name(self):
"""Gets the name of this BrandingTheme. # noqa: E501
Name of branding theme # noqa: E501
:return: The name of this BrandingTheme. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this BrandingTheme.
Name of branding theme # noqa: E501
:param name: The name of this BrandingTheme. # noqa: E501
:type: str
"""
self._name = name
@property
def logo_url(self):
"""Gets the logo_url of this BrandingTheme. # noqa: E501
The location of the image file used as the logo on this branding theme # noqa: E501
:return: The logo_url of this BrandingTheme. # noqa: E501
:rtype: str
"""
return self._logo_url
@logo_url.setter
def logo_url(self, logo_url):
"""Sets the logo_url of this BrandingTheme.
The location of the image file used as the logo on this branding theme # noqa: E501
:param logo_url: The logo_url of this BrandingTheme. # noqa: E501
:type: str
"""
self._logo_url = logo_url
@property
def type(self):
"""Gets the type of this BrandingTheme. # noqa: E501
Always INVOICE # noqa: E501
:return: The type of this BrandingTheme. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this BrandingTheme.
Always INVOICE # noqa: E501
:param type: The type of this BrandingTheme. # noqa: E501
:type: str
"""
allowed_values = ["INVOICE", "None"] # noqa: E501
if type:
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}".format( # noqa: E501
type, allowed_values
)
)
self._type = type
@property
def sort_order(self):
"""Gets the sort_order of this BrandingTheme. # noqa: E501
Integer – ranked order of branding theme. The default branding theme has a value of 0 # noqa: E501
:return: The sort_order of this BrandingTheme. # noqa: E501
:rtype: int
"""
return self._sort_order
@sort_order.setter
def sort_order(self, sort_order):
"""Sets the sort_order of this BrandingTheme.
Integer – ranked order of branding theme. The default branding theme has a value of 0 # noqa: E501
:param sort_order: The sort_order of this BrandingTheme. # noqa: E501
:type: int
"""
self._sort_order = sort_order
@property
def created_date_utc(self):
"""Gets the created_date_utc of this BrandingTheme. # noqa: E501
UTC timestamp of creation date of branding theme # noqa: E501
:return: The created_date_utc of this BrandingTheme. # noqa: E501
:rtype: datetime
"""
return self._created_date_utc
@created_date_utc.setter
def created_date_utc(self, created_date_utc):
"""Sets the created_date_utc of this BrandingTheme.
UTC timestamp of creation date of branding theme # noqa: E501
:param created_date_utc: The created_date_utc of this BrandingTheme. # noqa: E501
:type: datetime
"""
self._created_date_utc = created_date_utc
```
#### File: accounting/models/error.py
```python
import re # noqa: F401
from xero_python.models import BaseModel
class Error(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"error_number": "int",
"type": "str",
"message": "str",
"elements": "list[Element]",
}
attribute_map = {
"error_number": "ErrorNumber",
"type": "Type",
"message": "Message",
"elements": "Elements",
}
def __init__(
self, error_number=None, type=None, message=None, elements=None
): # noqa: E501
"""Error - a model defined in OpenAPI""" # noqa: E501
self._error_number = None
self._type = None
self._message = None
self._elements = None
self.discriminator = None
if error_number is not None:
self.error_number = error_number
if type is not None:
self.type = type
if message is not None:
self.message = message
if elements is not None:
self.elements = elements
@property
def error_number(self):
"""Gets the error_number of this Error. # noqa: E501
Exception number # noqa: E501
:return: The error_number of this Error. # noqa: E501
:rtype: int
"""
return self._error_number
@error_number.setter
def error_number(self, error_number):
"""Sets the error_number of this Error.
Exception number # noqa: E501
:param error_number: The error_number of this Error. # noqa: E501
:type: int
"""
self._error_number = error_number
@property
def type(self):
"""Gets the type of this Error. # noqa: E501
Exception type # noqa: E501
:return: The type of this Error. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Error.
Exception type # noqa: E501
:param type: The type of this Error. # noqa: E501
:type: str
"""
self._type = type
@property
def message(self):
"""Gets the message of this Error. # noqa: E501
Exception message # noqa: E501
:return: The message of this Error. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this Error.
Exception message # noqa: E501
:param message: The message of this Error. # noqa: E501
:type: str
"""
self._message = message
@property
def elements(self):
"""Gets the elements of this Error. # noqa: E501
Array of Elements of validation Errors # noqa: E501
:return: The elements of this Error. # noqa: E501
:rtype: list[Element]
"""
return self._elements
@elements.setter
def elements(self, elements):
"""Sets the elements of this Error.
Array of Elements of validation Errors # noqa: E501
:param elements: The elements of this Error. # noqa: E501
:type: list[Element]
"""
self._elements = elements
```
#### File: accounting/models/quotes.py
```python
import re # noqa: F401
from xero_python.models import BaseModel
class Quotes(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {"quotes": "list[Quote]"}
attribute_map = {"quotes": "Quotes"}
def __init__(self, quotes=None): # noqa: E501
"""Quotes - a model defined in OpenAPI""" # noqa: E501
self._quotes = None
self.discriminator = None
if quotes is not None:
self.quotes = quotes
@property
def quotes(self):
"""Gets the quotes of this Quotes. # noqa: E501
:return: The quotes of this Quotes. # noqa: E501
:rtype: list[Quote]
"""
return self._quotes
@quotes.setter
def quotes(self, quotes):
"""Sets the quotes of this Quotes.
:param quotes: The quotes of this Quotes. # noqa: E501
:type: list[Quote]
"""
self._quotes = quotes
```
#### File: identity/models/access_token.py
```python
import re # noqa: F401
from xero_python.models import BaseModel
class AccessToken(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"id_token": "str",
"access_token": "str",
"expires_in": "float",
"token_type": "str",
"refresh_token": "str",
}
attribute_map = {
"id_token": "id_token",
"access_token": "access_token",
"expires_in": "expires_in",
"token_type": "token_type",
"refresh_token": "refresh_token",
}
def __init__(
self,
id_token=None,
access_token=None,
expires_in=None,
token_type=None,
refresh_token=None,
): # noqa: E501
"""AccessToken - a model defined in OpenAPI""" # noqa: E501
self._id_token = None
self._access_token = None
self._expires_in = None
self._token_type = None
self._refresh_token = None
self.discriminator = None
if id_token is not None:
self.id_token = id_token
if access_token is not None:
self.access_token = access_token
if expires_in is not None:
self.expires_in = expires_in
if token_type is not None:
self.token_type = token_type
if refresh_token is not None:
self.refresh_token = refresh_token
@property
def id_token(self):
"""Gets the id_token of this AccessToken. # noqa: E501
Xero unique identifier # noqa: E501
:return: The id_token of this AccessToken. # noqa: E501
:rtype: str
"""
return self._id_token
@id_token.setter
def id_token(self, id_token):
"""Sets the id_token of this AccessToken.
Xero unique identifier # noqa: E501
:param id_token: The id_token of this AccessToken. # noqa: E501
:type: str
"""
self._id_token = id_token
@property
def access_token(self):
"""Gets the access_token of this AccessToken. # noqa: E501
access token provided during authentication flow # noqa: E501
:return: The access_token of this AccessToken. # noqa: E501
:rtype: str
"""
return self._access_token
@access_token.setter
def access_token(self, access_token):
"""Sets the access_token of this AccessToken.
access token provided during authentication flow # noqa: E501
:param access_token: The access_token of this AccessToken. # noqa: E501
:type: str
"""
self._access_token = access_token
@property
def expires_in(self):
"""Gets the expires_in of this AccessToken. # noqa: E501
time in milliseconds until access token expires. # noqa: E501
:return: The expires_in of this AccessToken. # noqa: E501
:rtype: float
"""
return self._expires_in
@expires_in.setter
def expires_in(self, expires_in):
"""Sets the expires_in of this AccessToken.
time in milliseconds until access token expires. # noqa: E501
:param expires_in: The expires_in of this AccessToken. # noqa: E501
:type: float
"""
self._expires_in = expires_in
@property
def token_type(self):
"""Gets the token_type of this AccessToken. # noqa: E501
type of token i.e. Bearer # noqa: E501
:return: The token_type of this AccessToken. # noqa: E501
:rtype: str
"""
return self._token_type
@token_type.setter
def token_type(self, token_type):
"""Sets the token_type of this AccessToken.
type of token i.e. Bearer # noqa: E501
:param token_type: The token_type of this AccessToken. # noqa: E501
:type: str
"""
self._token_type = token_type
@property
def refresh_token(self):
"""Gets the refresh_token of this AccessToken. # noqa: E501
token used to refresh an expired access token # noqa: E501
:return: The refresh_token of this AccessToken. # noqa: E501
:rtype: str
"""
return self._refresh_token
@refresh_token.setter
def refresh_token(self, refresh_token):
"""Sets the refresh_token of this AccessToken.
token used to refresh an expired access token # noqa: E501
:param refresh_token: The refresh_token of this AccessToken. # noqa: E501
:type: str
"""
self._refresh_token = refresh_token
```
#### File: payrollau/models/deduction_line.py
```python
import re # noqa: F401
from xero_python.models import BaseModel
class DeductionLine(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"deduction_type_id": "str",
"calculation_type": "DeductionTypeCalculationType",
"amount": "float",
"percentage": "float",
"number_of_units": "float",
}
attribute_map = {
"deduction_type_id": "DeductionTypeID",
"calculation_type": "CalculationType",
"amount": "Amount",
"percentage": "Percentage",
"number_of_units": "NumberOfUnits",
}
def __init__(
self,
deduction_type_id=None,
calculation_type=None,
amount=None,
percentage=None,
number_of_units=None,
): # noqa: E501
"""DeductionLine - a model defined in OpenAPI""" # noqa: E501
self._deduction_type_id = None
self._calculation_type = None
self._amount = None
self._percentage = None
self._number_of_units = None
self.discriminator = None
self.deduction_type_id = deduction_type_id
self.calculation_type = calculation_type
if amount is not None:
self.amount = amount
if percentage is not None:
self.percentage = percentage
if number_of_units is not None:
self.number_of_units = number_of_units
@property
def deduction_type_id(self):
"""Gets the deduction_type_id of this DeductionLine. # noqa: E501
Xero deduction type identifier # noqa: E501
:return: The deduction_type_id of this DeductionLine. # noqa: E501
:rtype: str
"""
return self._deduction_type_id
@deduction_type_id.setter
def deduction_type_id(self, deduction_type_id):
"""Sets the deduction_type_id of this DeductionLine.
Xero deduction type identifier # noqa: E501
:param deduction_type_id: The deduction_type_id of this DeductionLine. # noqa: E501
:type: str
"""
if deduction_type_id is None:
raise ValueError(
"Invalid value for `deduction_type_id`, must not be `None`"
) # noqa: E501
self._deduction_type_id = deduction_type_id
@property
def calculation_type(self):
"""Gets the calculation_type of this DeductionLine. # noqa: E501
:return: The calculation_type of this DeductionLine. # noqa: E501
:rtype: DeductionTypeCalculationType
"""
return self._calculation_type
@calculation_type.setter
def calculation_type(self, calculation_type):
"""Sets the calculation_type of this DeductionLine.
:param calculation_type: The calculation_type of this DeductionLine. # noqa: E501
:type: DeductionTypeCalculationType
"""
if calculation_type is None:
raise ValueError(
"Invalid value for `calculation_type`, must not be `None`"
) # noqa: E501
self._calculation_type = calculation_type
@property
def amount(self):
"""Gets the amount of this DeductionLine. # noqa: E501
Deduction type amount # noqa: E501
:return: The amount of this DeductionLine. # noqa: E501
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this DeductionLine.
Deduction type amount # noqa: E501
:param amount: The amount of this DeductionLine. # noqa: E501
:type: float
"""
self._amount = amount
@property
def percentage(self):
"""Gets the percentage of this DeductionLine. # noqa: E501
The Percentage of the Deduction # noqa: E501
:return: The percentage of this DeductionLine. # noqa: E501
:rtype: float
"""
return self._percentage
@percentage.setter
def percentage(self, percentage):
"""Sets the percentage of this DeductionLine.
The Percentage of the Deduction # noqa: E501
:param percentage: The percentage of this DeductionLine. # noqa: E501
:type: float
"""
self._percentage = percentage
@property
def number_of_units(self):
"""Gets the number_of_units of this DeductionLine. # noqa: E501
Deduction number of units # noqa: E501
:return: The number_of_units of this DeductionLine. # noqa: E501
:rtype: float
"""
return self._number_of_units
@number_of_units.setter
def number_of_units(self, number_of_units):
"""Sets the number_of_units of this DeductionLine.
Deduction number of units # noqa: E501
:param number_of_units: The number_of_units of this DeductionLine. # noqa: E501
:type: float
"""
self._number_of_units = number_of_units
```
#### File: payrollau/models/settings.py
```python
import re # noqa: F401
from xero_python.models import BaseModel
class Settings(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"accounts": "list[Account]",
"tracking_categories": "SettingsTrackingCategories",
"days_in_payroll_year": "int",
}
attribute_map = {
"accounts": "Accounts",
"tracking_categories": "TrackingCategories",
"days_in_payroll_year": "DaysInPayrollYear",
}
def __init__(
self, accounts=None, tracking_categories=None, days_in_payroll_year=None
): # noqa: E501
"""Settings - a model defined in OpenAPI""" # noqa: E501
self._accounts = None
self._tracking_categories = None
self._days_in_payroll_year = None
self.discriminator = None
if accounts is not None:
self.accounts = accounts
if tracking_categories is not None:
self.tracking_categories = tracking_categories
if days_in_payroll_year is not None:
self.days_in_payroll_year = days_in_payroll_year
@property
def accounts(self):
"""Gets the accounts of this Settings. # noqa: E501
Payroll Account details for SuperExpense, SuperLiabilty, WagesExpense, PAYGLiability & WagesPayable. # noqa: E501
:return: The accounts of this Settings. # noqa: E501
:rtype: list[Account]
"""
return self._accounts
@accounts.setter
def accounts(self, accounts):
"""Sets the accounts of this Settings.
Payroll Account details for SuperExpense, SuperLiabilty, WagesExpense, PAYGLiability & WagesPayable. # noqa: E501
:param accounts: The accounts of this Settings. # noqa: E501
:type: list[Account]
"""
self._accounts = accounts
@property
def tracking_categories(self):
"""Gets the tracking_categories of this Settings. # noqa: E501
:return: The tracking_categories of this Settings. # noqa: E501
:rtype: SettingsTrackingCategories
"""
return self._tracking_categories
@tracking_categories.setter
def tracking_categories(self, tracking_categories):
"""Sets the tracking_categories of this Settings.
:param tracking_categories: The tracking_categories of this Settings. # noqa: E501
:type: SettingsTrackingCategories
"""
self._tracking_categories = tracking_categories
@property
def days_in_payroll_year(self):
"""Gets the days_in_payroll_year of this Settings. # noqa: E501
Number of days in the Payroll year # noqa: E501
:return: The days_in_payroll_year of this Settings. # noqa: E501
:rtype: int
"""
return self._days_in_payroll_year
@days_in_payroll_year.setter
def days_in_payroll_year(self, days_in_payroll_year):
"""Sets the days_in_payroll_year of this Settings.
Number of days in the Payroll year # noqa: E501
:param days_in_payroll_year: The days_in_payroll_year of this Settings. # noqa: E501
:type: int
"""
self._days_in_payroll_year = days_in_payroll_year
```
#### File: 6enno/FarmXero/xeroAccounts.py
```python
import re
COLLAT = 601
MINER_FEE = 311
BURN_FEE = 312
SLASH = 319
TRANSFERS = 990
BLOCK_REWARDS = 200
MINER_BALANCE = 601
def getTb(accountingApi, tennant, date, printIt=False):
tb = {}
report = accountingApi.get_report_trial_balance(tennant, date=date)
rows = report.reports[0].rows
for r in rows:
try:
for rr in r.rows:
try:
debits = float(rr.cells[3].value)
except:
debits = 0
try:
credits = float(rr.cells[4].value)
except:
credits = 0
amount = debits - credits
description = rr.cells[0].value
account = re.findall((r'\((\d+)\)'), description)[0]
tb[account] = amount;
# print(str(account))
if (printIt):
print(str(rr.cells[0].value) +' | '+ str(amount))
except:
pass
return tb
``` |
{
"source": "6f/fava",
"score": 2
} |
#### File: fava/contrib/scripts.py
```python
import json
import os
import click
import requests
from beancount.query import query_env
from beancount.query import query_parser
from fava import LOCALES
BASE_PATH = os.path.normpath(
os.path.join(os.path.dirname(__file__), "../fava")
)
@click.group()
def cli():
"""Various utilities."""
def _env_to_list(attributes):
for name in attributes.keys():
if isinstance(name, tuple):
name = name[0]
yield name
@cli.command()
def generate_bql_grammar_json():
"""Generate a JSON file with BQL grammar attributes.
The online code editor needs to have the list of available columns,
functions, and keywords for syntax highlighting and completion.
Should be run whenever the BQL changes."""
target_env = query_env.TargetsEnvironment()
data = {
"columns": sorted(set(_env_to_list(target_env.columns))),
"functions": sorted(set(_env_to_list(target_env.functions))),
"keywords": sorted({kw.lower() for kw in query_parser.Lexer.keywords}),
}
path = os.path.join(
os.path.dirname(__file__),
"../fava/static/javascript/codemirror/bql-grammar.ts",
)
with open(path, "w", encoding="utf-8") as json_file:
json_file.write("export default " + json.dumps(data))
@cli.command()
def download_translations():
"""Fetch updated translations from POEditor.com."""
token = os.environ.get("POEDITOR_TOKEN")
if not token:
raise click.UsageError(
"The POEDITOR_TOKEN environment variable needs to be set."
)
for language in LOCALES:
download_from_poeditor(language, token)
@cli.command()
def upload_translations():
"""Upload .pot message catalog to POEditor.com."""
token = os.environ.get("POEDITOR_TOKEN")
if not token:
raise click.UsageError(
"The POEDITOR_TOKEN environment variable needs to be set."
)
path = os.path.join(BASE_PATH, f"translations/messages.pot")
click.echo(f"Uploading message catalog: {path}")
data = {
"api_token": token,
"id": 90283,
"updating": "terms",
# "sync_terms": 1,
}
files = {"file": open(path, "rb")}
request = requests.post(
"https://api.poeditor.com/v2/projects/upload", data=data, files=files
)
click.echo("Done: " + str(request.json()["result"]["terms"]))
# For these languages, the name on POEDITOR is off.
POEDITOR_LANGUAGE_NAME = {"zh": "zh-CN", "zh_Hant_TW": "zh-TW"}
def download_from_poeditor(language, token):
"""Download .po-file from POEditor and save to disk."""
click.echo(f'Downloading .po-file for language "{language}"')
poeditor_name = POEDITOR_LANGUAGE_NAME.get(language, language)
data = {
"api_token": token,
"id": 90283,
"language": poeditor_name,
"type": "po",
}
request = requests.post(
"https://api.poeditor.com/v2/projects/export", data=data
)
url = request.json()["result"]["url"]
content = requests.get(url).content
folder = os.path.join(BASE_PATH, "translations", language, "LC_MESSAGES")
if not os.path.exists(folder):
os.makedirs(folder)
path = os.path.join(folder, f"messages.po")
with open(path, "wb") as file_:
file_.write(content)
click.echo(f'Downloaded to "{path}"')
if __name__ == "__main__":
cli()
```
#### File: fava/tests/test_core_watcher.py
```python
import time
from fava.core.watcher import Watcher
def test_watcher_file(tmpdir):
file1 = tmpdir.join("file1")
file2 = tmpdir.join("file2")
file1.write("test")
file2.write("test")
watcher = Watcher()
watcher.update([str(file1), str(file2)], [])
assert not watcher.check()
# time.time is too precise
time.sleep(1)
file1.write("test2")
assert watcher.check()
def test_watcher_folder(tmpdir):
folder = tmpdir.mkdir("folder")
folder.mkdir("bar")
watcher = Watcher()
watcher.update([], [str(folder)])
assert not watcher.check()
# time.time is too precise
time.sleep(1)
folder.mkdir("bar2")
assert watcher.check()
``` |
{
"source": "6ftunder/open.kattis",
"score": 4
} |
#### File: py/Quite a Problem/quite_a_problem.py
```python
import sys
def find_problem(string, find='problem'):
'''Tries to find problem in a string'''
return find in string.lower().rstrip() # strip \n and put everything to lowercase
for string in sys.stdin:
# goes through every input we put in and tries to find problem in the string
print('yes' if find_problem(string) else 'no')
``` |
{
"source": "6H057WH1P3/Asit",
"score": 2
} |
#### File: 6H057WH1P3/Asit/main.py
```python
import os
import sys
sys.path.append("./src")
sys.path.append("./lib")
import requests
import asit
import updater
AUTHOR = "6H057WH1P3"
PROJECT = "Asit"
VERSION = "v1.1.6"
def main():
accounts_path = "./data/accounts.txt"
# check for updates
update_handler = updater.GithubUpdater(AUTHOR, PROJECT, VERSION)
update_handler.update()
# just do the thing
bot = asit.ManageAccounts(accounts_path)
bot.manage()
main()
```
#### File: Asit/src/asit.py
```python
import random
import time
import requests
class Account:
# C'tor
def __init__(self, language, world, user, password, ability):
# def standard class variables
self.cookie = ""
self.language = language
self.world = world
self.user = user
self.password = password
self.ability = ability
# preparing header and basic url for get and post requests
if language == "de":
self.basic_url = "http://welt" + self.world + ".freewar.de/freewar/internal/"
self.header = {"Host": "welt" + self.world + ".freewar.de", "Connection": "keep-alive", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64)"}
elif language == "en":
self.basic_url = "http://world" + self.world + ".freewar.com/freewar/internal/"
self.header = {"Host": "world" + self.world + ".freewar.com", "Connection": "keep-alive", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64)"}
def login(self):
print("\t[*] Logging in")
login_url = self.basic_url + "index.php"
# really annoying
if self.language == "de":
login_submit = "Einloggen"
elif self.language == "en":
login_submit = "Login"
# login payload / post parameters
login_payload = {"name": self.user, "password": self.password, "submit": login_submit}
# login request
login_request = requests.post(login_url, data = login_payload, headers = self.header)
# nesseccary for session management in other requests
self.cookie = login_request.cookies
print("\t[+] Login successful")
return 0
# nesseccary to access all other links in fw main window after login
def redirect(self):
print("\t[*] Redirecting")
redirect_url = self.basic_url + "frset.php"
requests.get(redirect_url, headers = self.header, cookies = self.cookie)
print("\t[+] Redirect successful")
return 0
# function to train characters abilities
def train(self):
# the training sequence
print("\t[*] Training")
train_url = self.basic_url + "ability.php"
train_payload = {"action": "train", "ability_id": self.ability}
requests.get(train_url, params = train_payload, headers = self.header, cookies = self.cookie)
print("\t[+] Training successful")
# preparing for the training status request
status_payload = {"action": "show_ability", "ability_id": self.ability}
# requesting content of main frame
status_request = requests.get(train_url, params = status_payload, headers = self.header, cookies = self.cookie)
if self.language == "de":
search_parameters = ["Aktuelle Stufe: ", "Maximale Stufe: "]
# TODO: online den genauen text nachschlagen
elif self.language == "en":
search_parameters = ["actual level: ", "maximal level: "]
output = "\t[*] Actual level: "
first = True
# looking for search parameters in http response
for search_text in search_parameters:
# exception handling
try:
position = status_request.text.find(search_text)
if (position == -1):
raise RuntimeError("Bad Request")
except RuntimeError:
print("\t[-] Could not found ability level.")
return 1
# TODO: Hier gehts weiter
text_length = len(search_text)
ability_level = status_request.text[position + text_length : position + text_length + 3]
# geting a clean output
ability_level = ability_level.strip("<")
ability_level = ability_level.strip("/")
ability_level = ability_level.strip("b")
output += ability_level
if first:
first = False
output += " / "
print(output)
return 0
# function to pick up accounts oil if he's on the right field for that
def oil(self):
print("\t[*] Picking up oil")
# requesting content of main frame
main_url = self.basic_url + "main.php"
main_request = requests.get(main_url, headers = self.header, cookies = self.cookie)
# something called exception handling
try:
position = main_request.text.find("checkid=")
if (position == -1):
raise RuntimeError("wrong position")
except RuntimeError:
print("\t[-] Oil isn't ready yet or account is on the wrong position.")
return 1
# pincking up the oil
oil_url = self.basic_url + "main.php"
oil_payload = {"arrive_eval": "drink", "checkid": main_request.text[position + 8 : position + 15]}
requests.get(oil_url, params = oil_payload, headers = self.header, cookies = self.cookie)
return 0
# for a clean session
def logout(self):
print("\t[*] Logging out")
logout_url = self.basic_url + "logout.php"
requests.get(logout_url, headers = self.header, cookies = self.cookie)
print("\t[+] Logged out")
return 0
def automatic_sit(self):
try:
self.login()
self.redirect()
self.train()
self.oil()
self.logout()
except:
print("[!] Connection Error.")
return 1
class ManageAccounts:
def __init__(self, account_path):
self.accounts = []
self.later = []
# filling the list of credentials
with open(account_path, "r") as account_file:
for line in account_file:
splitted_line = line.strip("\n").split(", ")
#print(splitted_line)
if len(splitted_line) == 5:
self.accounts.append(splitted_line)
def manage(self):
while len(self.accounts) > 0:
for language, world, user, password, ability in self.accounts:
# skipping credentials of the same world
skip = False
for account in self.accounts:
if (account[1] == world) and (account[2] != user):
self.later.append(account)
self.accounts.remove(account)
skip = True
if skip:
continue
# if not skipped, handling the credential
print("\n[*] World: " + world + " Account: " + user + " Server: " + language)
FWAccount = Account(language, world, user, password, ability)
if FWAccount.automatic_sit():
return 1
# writing memorized credentials back to be handled
if len(self.later) > 0:
random_time = random.randint(180, 300)
print("[*] Wating " + str(random_time) + " Seconds to log other accounts savely.")
time.sleep(random_time)
self.accounts = self.later
self.later.clear()
else:
self.accounts.clear()
``` |
{
"source": "6henrykim/UnityExamples",
"score": 3
} |
#### File: BlockLibraries/UnityExamples/Click.py
```python
from pysensationcore import *
import sensation_helpers as sh
import os
# Load a click signal to act as intensity profile
# This is 1600 samples long 0.1s @16kHz.
intensitySignal = []
with open(os.path.join(os.path.dirname(__file__), 'Click_Intensity.txt')) as input_file:
lines = input_file.readlines()
for line in lines:
intensitySignal.append(float(line.replace("\n", "")))
# Sample rate of exported intensitySignal
fs = 16000
# Number of Samples in click
NUM_SAMPLES = len(intensitySignal)
# Define a new Block to drive the intensity of a Circle.
clickIntensityBlock = defineBlock("ClickIntensity")
defineInputs(clickIntensityBlock, "t", "sampleRate")
defineBlockInputDefaultValue(clickIntensityBlock.sampleRate, (fs, 0,0))
def clickIntensity(inputs):
t = inputs[0][0] % 1
sampleRate = inputs[1][0]
# Avoid divide by zero
if sampleRate == 0:
return (0,0,0)
# Time interval per sample
intervalPerSample = (1/sampleRate)
# Get the index of the closest time sample for the Click signal
ix = int(t/intervalPerSample)
if ix < NUM_SAMPLES - 1:
signalValue = intensitySignal[ix]
return (signalValue, 0, 0)
else:
return (0,0,0)
defineOutputs(clickIntensityBlock, "out")
setMetaData(clickIntensityBlock.out, "Sensation-Producing", False)
defineBlockOutputBehaviour(clickIntensityBlock.out, clickIntensity)
# An Object Path Sensation which has its intensity modultaed by a clickIntensity Signal
objectPathInstance = createInstance("LissajousPath", "circlePath")
clickIntensityInstance = createInstance("ClickIntensity", "clickIntensity")
click = sh.createSensationFromPath("Click",
{
("t", clickIntensityInstance.t) : (0,0,0),
("sampleRate", clickIntensityInstance.sampleRate) : (fs,0,0),
("size", objectPathInstance.sizeX) : (0.02, 0.0, 0.0),
("size", objectPathInstance.sizeY) : (0.02, 0.0, 0.0),
("paramA", objectPathInstance.paramA) : (3, 0.0, 0.0),
("paramB", objectPathInstance.paramB) : (2, 0.0, 0.0)
},
output = objectPathInstance.out,
renderMode = sh.RenderMode.Loop,
intensity = clickIntensityInstance.out,
drawFrequency = 80)
setMetaData(click.size, "Type", "Scalar")
setMetaData(click.sampleRate, "Type", "Scalar")
setMetaData(click, "Allow-Transform", True)
setMetaData(click.paramA, "Input-Visibility", False)
setMetaData(click.paramB, "Input-Visibility", False)
```
#### File: BlockLibraries/UnityExamples/Sphere.py
```python
from pysensationcore import *
import sensation_helpers as sh
import NearestPointOnPlane
import NonParallelVector
import RightTriangleSideLength
def connectWithTrace(src, dst):
t = createInstance("Trace", "trace")
connect(src, t.input)
connect(t.out, dst)
def defineCircleTransformBlock():
# A block to calculate the transform matrix needed to place a circle at the
# plane-sphere intersection
circleTransformBlock = defineBlock("SphereCircleTransform")
defineInputs(circleTransformBlock,
"planeNormal",
"circleCentre")
defineOutputs(circleTransformBlock, "out")
setMetaData(circleTransformBlock.out, "Sensation-Producing", False)
vectorNotParallelToPlaneNormal = createInstance("NonParallelVector", "vectorNotParallelToPlaneNormal")
connect(circleTransformBlock.planeNormal, vectorNotParallelToPlaneNormal.v)
firstOrthonormalVectorInPlane = createInstance("CrossProduct", "firstOrthonormalVectorInPlane")
connect(circleTransformBlock.planeNormal, firstOrthonormalVectorInPlane.lhs)
connect(vectorNotParallelToPlaneNormal.out, firstOrthonormalVectorInPlane.rhs)
secondOrthonormalVectorInPlane = createInstance("CrossProduct", "secondOrthonormalVectorInPlane")
connect(circleTransformBlock.planeNormal, secondOrthonormalVectorInPlane.lhs)
connect(firstOrthonormalVectorInPlane.out, secondOrthonormalVectorInPlane.rhs)
transform = createInstance("ComposeTransform", "transform")
connect(firstOrthonormalVectorInPlane.normalized, transform.x)
connect(secondOrthonormalVectorInPlane.normalized, transform.y)
connect(circleTransformBlock.planeNormal, transform.z)
connect(circleTransformBlock.circleCentre, transform.o)
connect(transform.out, circleTransformBlock.out)
return circleTransformBlock
planeSphereIntersectionBlock = defineBlock("PlaneSphereIntersection")
defineInputs(planeSphereIntersectionBlock,
"sphereCentre",
"sphereRadius",
"planeNormal",
"planePoint")
defineOutputs(planeSphereIntersectionBlock, "intersected", "out")
setMetaData(planeSphereIntersectionBlock.intersected, "Sensation-Producing", False)
calcCircleCentre = createInstance("NearestPointOnPlane", "circleCentre")
connect(planeSphereIntersectionBlock.planeNormal, calcCircleCentre.planeNormal)
connect(planeSphereIntersectionBlock.planePoint, calcCircleCentre.planePoint)
connect(planeSphereIntersectionBlock.sphereCentre, calcCircleCentre.point)
calcIntersected = createInstance("Comparator", "calcIntersected")
connect(calcCircleCentre.distance, calcIntersected.a)
connect(planeSphereIntersectionBlock.sphereRadius, calcIntersected.b)
connect(Constant((0,0,0)), calcIntersected.returnValueIfAGreaterThanB)
connect(Constant((0,0,0)), calcIntersected.returnValueIfAEqualsB)
connect(Constant((1,0,0)), calcIntersected.returnValueIfALessThanB)
connect(calcIntersected.out, planeSphereIntersectionBlock.intersected)
calcRadius = createInstance("RightTriangleSideLength", "calcRadius")
connect(planeSphereIntersectionBlock.sphereRadius, calcRadius.hypotenuse)
connect(calcCircleCentre.distance, calcRadius.side)
defineCircleTransformBlock()
circleTransform = createInstance("SphereCircleTransform", "circleTransform")
connect(planeSphereIntersectionBlock.planeNormal, circleTransform.planeNormal)
connect(calcCircleCentre.nearestPointOnPlane, circleTransform.circleCentre)
circlePath = createInstance("CirclePath", "circlePath")
connect(calcRadius.out, circlePath.radius)
circleLocatedInVirtualSpace = createInstance("TransformPath", "circleLocatedInVirtualSpace")
connect(circlePath.out, circleLocatedInVirtualSpace.path)
connect(circleTransform.out, circleLocatedInVirtualSpace.transform)
connect(circleLocatedInVirtualSpace.out, planeSphereIntersectionBlock.out)
sphereBlock = defineBlock("Sphere")
defineInputs(sphereBlock,
"centre",
"radius",
"palm_normal",
"palm_position")
defineBlockInputDefaultValue(sphereBlock.centre, (0, 0.25, 0))
defineBlockInputDefaultValue(sphereBlock.radius, (0.06, 0, 0))
defineBlockInputDefaultValue(sphereBlock.palm_normal, (0, 0, 1))
defineBlockInputDefaultValue(sphereBlock.palm_position, (0, 0, 0.211))
setMetaData(sphereBlock.radius, "Type", "Scalar")
setMetaData(sphereBlock.centre, "Type", "Point")
setMetaData(sphereBlock.palm_normal, "Input-Visibility", False)
setMetaData(sphereBlock.palm_position, "Input-Visibility", False)
defineOutputs(sphereBlock, "out")
planeSphereIntersection = createInstance("PlaneSphereIntersection", "planeSphereIntersection")
connect(sphereBlock.centre, planeSphereIntersection.sphereCentre)
connect(sphereBlock.radius, planeSphereIntersection.sphereRadius)
connect(sphereBlock.palm_normal, planeSphereIntersection.planeNormal)
connect(sphereBlock.palm_position, planeSphereIntersection.planePoint)
focalPoints = sh.createVirtualToPhysicalFocalPointPipeline(sphereBlock,
planeSphereIntersection.out,
renderMode = sh.RenderMode.Loop,
drawFrequency = 70)
evalOnlyIfIntersecting = createInstance("Comparator", "evalOnlyIfIntersecting")
connect(planeSphereIntersection.intersected, evalOnlyIfIntersecting.a)
connect(Constant((1,0,0)), evalOnlyIfIntersecting.b)
connect(Constant((0,0,0,0)), evalOnlyIfIntersecting.returnValueIfAGreaterThanB)
connect(focalPoints, evalOnlyIfIntersecting.returnValueIfAEqualsB)
connect(Constant((0,0,0,0)), evalOnlyIfIntersecting.returnValueIfALessThanB)
connect(evalOnlyIfIntersecting.out, sphereBlock.out)
```
#### File: BlockLibraries/UnityExamples/TimeOps.py
```python
from pysensationcore import *
# === LoopTime ===
# A Block which loops the time input, such that the incoming 'world' time
# is always looped between 0 -> Duration
# If Duration is less than or equal to zero, regular time is used.
# By default, Loop Time will loop time every 2 seconds.
loopTimeBlock = defineBlock("LoopTime")
defineInputs(loopTimeBlock, "t", "duration")
defineBlockInputDefaultValue(loopTimeBlock.t, (0, 0, 0))
defineBlockInputDefaultValue(loopTimeBlock.duration, (2, 0, 0))
defineOutputs(loopTimeBlock, "time")
def loopTime(inputs):
t = inputs[0][0]
duration = inputs[1][0]
if duration <= 0:
return t
else:
loopedTime = t % duration
return (loopedTime,0,0)
defineBlockOutputBehaviour(loopTimeBlock.time, loopTime)
setMetaData(loopTimeBlock.time, "Sensation-Producing", False)
# === ReverseTime ===
# A Block which reverses the time input, such that the incoming 'world' time
# is negated
reverseTimeBlock = defineBlock("ReverseTime")
defineInputs(reverseTimeBlock, "t", "reversed")
defineBlockInputDefaultValue(reverseTimeBlock.t, (0, 0, 0))
defineBlockInputDefaultValue(reverseTimeBlock.reversed, (1, 0, 0))
defineOutputs(reverseTimeBlock, "time")
def reverseTime(inputs):
t = inputs[0][0]
reversed = int(inputs[1][0])
if (reversed >= 1):
return (-t,0,0)
else:
return (t,0,0)
defineBlockOutputBehaviour(reverseTimeBlock.time, reverseTime)
setMetaData(reverseTimeBlock.time, "Sensation-Producing", False)
# === LoopTime ===
# A Block which bounces the time input, such that the incoming 'world' time
# is always looped between 0 -> Duration -> 0 -> - Duration
# If Duration is less than or equal to zero, regular time is used.
# By default, Loop Time will loop time every 2 seconds.
bounceTimeBlock = defineBlock("BounceTime")
defineInputs(bounceTimeBlock, "t", "duration")
defineBlockInputDefaultValue(bounceTimeBlock.t, (0, 0, 0))
defineBlockInputDefaultValue(bounceTimeBlock.duration, (2, 0, 0))
defineOutputs(bounceTimeBlock, "time")
def bounceTime(inputs):
t = inputs[0][0]
duration = inputs[1][0]
if duration <= 0:
return t
else:
loopedTime = t % duration
return (loopedTime,0,0)
defineBlockOutputBehaviour(bounceTimeBlock.time, bounceTime)
setMetaData(bounceTimeBlock.time, "Sensation-Producing", False)
``` |
{
"source": "6im0n/InstaRepostBot",
"score": 3
} |
#### File: 6im0n/InstaRepostBot/post_to_account.py
```python
import os
from instabot import Bot
import psycopg2
import time
import io
import shutil
import string
from config import *
def postToAccount():
print("starting....")
db = psycopg2.connect(database=databaseName,user=db_user,password=passwordDataBase,host="127.0.0.1",port="5432")
cursor = db.cursor()
#bot = Bot()
os.chdir(Picture_path)
#bot.login(username = username,
# password = password)
print("LOGIN SUCCESS !")
r = cursor.execute("SELECT * FROM photos_instagram")
rows = cursor.fetchall()
for row in rows:
owner = row[3]
path = row[1]
status = row[2]
ID = row[0]
caption = "Owner: " + "@"+ owner + "\n" + "————————— \n If you are the owner of the post and want to remove it, please contact me and I will remove it\n—————————\n TAG: \n—————————"
print(caption);
print("")
print(status)
print("")
if str(status) != "POSTED":
#bot.upload_photo(path,caption=caption)
time.sleep(0)
#Make change status on DB
#sql_update_query = """Update photos_instagram set status = %s where id = %s"""
#cursor.execute(sql_update_query% ("'POSTED'", "'"+ID+"'"))
#db.commit()
count = cursor.rowcount
print(count, "Record Updated successfully ")
#cursor.execute("SELECT * FROM photos_instagram WHERE id = "+"'"+ID+"'"+";")
cursor.execute("UPDATE photos_instagram SET status = 'POSTED';")
os.rename(ID+".jpeg.REMOVE_ME",ID+".jpeg")
shutil.rmtree("HERE THE CONFIG FOLDER CREATE BY INSTABOTAPI")
print("POSTED")
else:
pass
print("NOTPOSTED")
db.commit()
db.close()
print("PostgreSQL connection is closed")
postToAccount()
``` |
{
"source": "6ixBit/Personal-Website",
"score": 3
} |
#### File: Personal-Website/app/tasks.py
```python
from redis import Redis
import os
from rq import Queue
from . import db, app
from .models import Git
import requests
from rq_scheduler import Scheduler
import redis
from rq import Queue
from datetime import datetime
url = 'https://api.github.com/users/6ixbit/repos?direction=desc'
def update_db(url):
req = requests.get(url, headers={'Authorization': 'token {}'.format(app.config['GIT_KEY'])}) # Make request to GitHub API
result = req.json() # Serve response as JSON
repos = {} # Hold temporary results pulled from Gituhb
listy = [] # List holds info for each repo, represented as dict(s) in a List
for x in result:
# Extract data needed
repos['name'] = x['name']
repos['description'] = x['description']
repos['created_at'] = x['created_at']
repos['size'] = x['size']
repos['language'] = x['language']
repos['last_updated'] = x['updated_at']
repos['repo_url'] = x['svn_url']
listy.append(repos)
git = Git.query.all() # Query entire database to be compared with new Git results
count=0
for x in git: # Once each repo has been pulled
if x.repo_name == listy[count]['name']: # if db val == git api val
if x.description != listy[count]['description']: # if git val has changed then update db
x.description = listy[count]['description']
db.session.commit()
print(x.repo_name + ' description updated')
elif x.created_at != listy[count]['created_at']:
x.created_at = listy[count]['created_at']
db.session.commit()
print(x.repo_name + ' created_at updated')
elif x.size != listy[count]['size']:
x.size = listy[count]['size']
db.session.commit()
print(x.repo_name + ' size updated')
elif x.language != listy[count]['language']:
x.language = listy[count]['language']
db.session.commit()
print(x.repo_name + ' language updated')
elif x.last_updated != listy[count]['last_updated']:
x.last_updated = listy[count]['last_updated']
db.session.commit()
print(x.repo_name + ' last_updated updated')
elif x.repo_url != listy[count]['repo_url']:
x.repo_url = listy[count]['repo_url']
db.session.commit()
print(x.repo_name + ' repo_url updated')
else:
print('No updates made!')
count += 1
db.session.close()
#r = Redis(host=os.environ.get("REDIS_URL")) # Setup Redis
#q = Queue(connection=r) # Setup Queue
#scheduler = Scheduler(connection=redis.from_url(os.environ.get("REDIS_URL")))
#job = scheduler.schedule( # Make DB calls every 30 minutes
# scheduled_time=datetime.utcnow(),
# args=[url],
# func=update_db,
#repeat=None,
#interval=1800)
#print('Job enqueued', job)
```
#### File: Personal-Website/app/views.py
```python
from . import app, mail, db
from .forms import Contact_form
from .models import Contacts, Git
from .tasks import url, update_db
from flask import render_template, make_response, request, redirect, url_for
from flask_mail import Message
from Config import Config
from redis import Redis
from rq import Queue
@app.route('/')
def index():
res = make_response(render_template('index.html'), 200) # Generate response object and return 200
return res
@app.route('/projects')
def projects():
repo_1 = Git.query.get(5) # Present certain repos
repo_2 = Git.query.get(3)
repo_3 = Git.query.get(8)
repo_4 = Git.query.get(4)
git = Git.query.all()
res = make_response(render_template('projects.html', title='Projects', git=git, repo_1=repo_1, repo_2=repo_2, repo_3=repo_3, repo_4=repo_4), 200)
return res
@app.route('/contact', methods=['POST', 'GET'])
def contact():
# Create instance of forms to be passed to template
form_c = Contact_form()
# Instance for database model
user = Contacts()
# Set to true upon successful email transmission
email_sent = False
# If user submits some data
if request.method == 'POST':
if form_c.validate_on_submit():
form = Contact_form()
msg = Message(subject=form.subject.data, recipients=['<EMAIL>'], sender=app.config['MAIL_USERNAME'])
msg.body = 'From: {} \n\n'.format(form.name.data) + form.message.data + '\n \n \n Sent by: {}'.format(form.email.data)
mail.send(msg)
email_sent = True # Alert user that email has been sent
# Insert into DB
user.name_ = form_c.name.data
user.email = form_c.email.data
user.subject = form_c.subject.data
user.message = form_c.message.data
db.session.add(user)
db.session.commit()
db.session.close()
# Clear data in forms once e-mails sent & then return the page
form_c.name.data = ''
form_c.email.data =''
form_c.subject.data = ''
form_c.message.data = ''
return render_template('contact.html', form=form_c, email_sent=email_sent, title='Contact')
# Respond with contact page, pass a form instance & return 200
res = make_response(render_template('contact.html', form=form_c, title='Contact'), 200)
return res
``` |
{
"source": "6ix-Inc/baserow",
"score": 2
} |
#### File: database/views/exceptions.py
```python
from baserow.core.exceptions import (
InstanceTypeDoesNotExist,
InstanceTypeAlreadyRegistered,
)
class ViewDoesNotExist(Exception):
"""Raised when trying to get a view that doesn't exist."""
class CannotShareViewTypeError(Exception):
"""Raised when trying to a share a view that cannot be shared"""
class ViewNotInTable(Exception):
"""Raised when a provided view does not belong to a table."""
def __init__(self, view_id=None, *args, **kwargs):
self.view_id = view_id
super().__init__(
f"The view {view_id} does not belong to the table.",
*args,
**kwargs,
)
class UnrelatedFieldError(Exception):
"""
Raised when a field is not related to the view. For example when someone tries to
update field options of a field that does not belong to the view's table.
"""
class ViewTypeAlreadyRegistered(InstanceTypeAlreadyRegistered):
pass
class ViewTypeDoesNotExist(InstanceTypeDoesNotExist):
pass
class ViewFilterDoesNotExist(Exception):
"""Raised when trying to get a view filter that does not exist."""
class ViewFilterNotSupported(Exception):
"""Raised when the view type does not support filters."""
class ViewFilterTypeNotAllowedForField(Exception):
"""Raised when the view filter type is compatible with the field type."""
def __init__(self, filter_type=None, field_type=None, *args, **kwargs):
self.filter_type = filter_type
self.field_type = field_type
super().__init__(
f"The view filter type {filter_type} is not compatible with field type "
f"{field_type}.",
*args,
**kwargs,
)
class ViewFilterTypeDoesNotExist(InstanceTypeDoesNotExist):
"""Raised when the view filter type was not found in the registry."""
class ViewFilterTypeAlreadyRegistered(InstanceTypeAlreadyRegistered):
"""Raised when the view filter type is already registered in the registry."""
class ViewSortDoesNotExist(Exception):
"""Raised when trying to get a view sort that does not exist."""
class ViewSortNotSupported(Exception):
"""Raised when the view type does not support sorting."""
class ViewSortFieldAlreadyExist(Exception):
"""Raised when a view sort with the field type already exists."""
class ViewSortFieldNotSupported(Exception):
"""Raised when a field does not supports sorting in a view."""
class ViewDoesNotSupportFieldOptions(Exception):
"""Raised when a view type does not support field options."""
class FormViewFieldTypeIsNotSupported(Exception):
"""Raised when someone tries to enable an unsupported form view field."""
def __init__(self, field_type, *args, **kwargs):
self.field_type = field_type
super().__init__(
f"The field type {field_type} is not compatible with the form view.",
*args,
**kwargs,
)
```
#### File: api/fields/test_field_views_types.py
```python
from datetime import date, datetime
from decimal import Decimal
import pytest
from django.shortcuts import reverse
from faker import Faker
from freezegun import freeze_time
from pytz import timezone
from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST
from baserow.contrib.database.fields.handler import FieldHandler
from baserow.contrib.database.fields.models import (
CreatedOnField,
LastModifiedField,
LongTextField,
MultipleSelectField,
SelectOption,
URLField,
DateField,
EmailField,
FileField,
NumberField,
PhoneNumberField,
FormulaField,
LookupField,
)
@pytest.mark.django_db
def test_text_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
text_field = data_fixture.create_text_field(
table=table, order=0, name="Old name", text_default="Default"
)
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": text_field.id}),
{"name": "New name"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["text_default"] == "Default"
@pytest.mark.django_db
def test_long_text_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
fake = Faker()
text = fake.text()
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Long text", "type": "long_text"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "long_text"
assert LongTextField.objects.all().count() == 1
field_id = response_json["id"]
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_id}),
{"name": "Long text 2"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": text},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == text
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.long_text_2 == text
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": ""},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.long_text_2 == ""
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": None},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] is None
row = model.objects.all().last()
assert row.long_text_2 is None
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] is None
row = model.objects.all().last()
assert row.long_text_2 is None
url = reverse("api:database:fields:item", kwargs={"field_id": field_id})
response = api_client.delete(url, HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_200_OK
assert LongTextField.objects.all().count() == 0
@pytest.mark.django_db
def test_url_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "URL", "type": "url"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "url"
assert URLField.objects.all().count() == 1
field_id = response_json["id"]
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_id}),
{"name": "URL2"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": "https://test.nl"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == "https://test.nl"
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.url2 == "https://test.nl"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": ""},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.url2 == ""
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": None},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.url2 == ""
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.url2 == ""
url = reverse("api:database:fields:item", kwargs={"field_id": field_id})
response = api_client.delete(url, HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_200_OK
assert URLField.objects.all().count() == 0
@pytest.mark.django_db
def test_date_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Date", "type": "date"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "date"
assert DateField.objects.all().count() == 1
date_field_id = response_json["id"]
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Datetime", "type": "date", "date_include_time": True},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "date"
assert DateField.objects.all().count() == 2
date_time_field_id = response_json["id"]
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{date_field_id}": "2020-04-01 12:00",
f"field_{date_time_field_id}": "2020-04-01",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert response_json["detail"][f"field_{date_field_id}"][0]["code"] == "invalid"
assert response_json["detail"][f"field_{date_time_field_id}"][0]["code"] == (
"invalid"
)
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{date_field_id}": "2020-04-01",
f"field_{date_time_field_id}": "2020-04-01 14:30:20",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{date_field_id}"] == "2020-04-01"
assert response_json[f"field_{date_time_field_id}"] == "2020-04-01T14:30:20Z"
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.date == date(2020, 4, 1)
assert row.datetime == datetime(2020, 4, 1, 14, 30, 20, tzinfo=timezone("UTC"))
url = reverse("api:database:fields:item", kwargs={"field_id": date_time_field_id})
response = api_client.delete(url, HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_200_OK
assert DateField.objects.all().count() == 1
@pytest.mark.django_db
def test_email_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Email", "type": "email"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "email"
assert EmailField.objects.all().count() == 1
field_id = response_json["id"]
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_id}),
{"name": "Email2"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": "<EMAIL>"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == "<EMAIL>"
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.email2 == "<EMAIL>"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": ""},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.email2 == ""
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": None},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.email2 == ""
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.email2 == ""
email = reverse("api:database:fields:item", kwargs={"field_id": field_id})
response = api_client.delete(email, HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_200_OK
assert EmailField.objects.all().count() == 0
@pytest.mark.django_db
def test_file_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
grid = data_fixture.create_grid_view(table=table)
with freeze_time("2020-01-01 12:00"):
user_file_1 = data_fixture.create_user_file(
original_name="test.txt",
original_extension="txt",
unique="sdafi6WtHfnDrU6S1lQKh9PdC7PeafCA",
size=10,
mime_type="text/plain",
is_image=True,
image_width=1920,
image_height=1080,
sha256_hash=(
"a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e"
),
)
user_file_2 = data_fixture.create_user_file()
user_file_3 = data_fixture.create_user_file()
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "File", "type": "file"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "file"
assert FileField.objects.all().count() == 1
field_id = response_json["id"]
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_id}),
{"name": "File2"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == []
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": []},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == []
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": [{"without_name": "test"}]},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": [{"name": "an__invalid__name.jpg"}]},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"][f"field_{field_id}"][0]["name"][0]["code"] == "invalid"
)
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": [{"name": "not_existing.jpg"}]},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_USER_FILE_DOES_NOT_EXIST"
assert response_json["detail"] == "The user file not_existing.jpg does not exist."
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": [{"name": user_file_1.name, "is_image": True}]},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert (
response_json[f"field_{field_id}"][0]["visible_name"]
== user_file_1.original_name
)
assert response_json[f"field_{field_id}"][0]["name"] == (
"sdafi6WtHfnDrU6S1lQKh9PdC7PeafCA_"
"a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e.txt"
)
assert response_json[f"field_{field_id}"][0]["size"] == 10
assert response_json[f"field_{field_id}"][0]["mime_type"] == "text/plain"
assert response_json[f"field_{field_id}"][0]["is_image"] is True
assert response_json[f"field_{field_id}"][0]["image_width"] == 1920
assert response_json[f"field_{field_id}"][0]["image_height"] == 1080
assert response_json[f"field_{field_id}"][0]["uploaded_at"] == (
"2020-01-01T12:00:00+00:00"
)
assert "localhost:8000" in response_json[f"field_{field_id}"][0]["url"]
assert len(response_json[f"field_{field_id}"][0]["thumbnails"]) == 1
assert (
"localhost:8000"
in response_json[f"field_{field_id}"][0]["thumbnails"]["tiny"]["url"]
)
assert (
"sdafi6WtHfnDrU6S1lQKh9PdC7PeafCA_"
"a591a6d40bf420404a011733cfb7b190d62c65bf0bcda32b57b277d9ad9f146e.txt"
in response_json[f"field_{field_id}"][0]["thumbnails"]["tiny"]["url"]
)
assert "tiny" in response_json[f"field_{field_id}"][0]["thumbnails"]["tiny"]["url"]
assert response_json[f"field_{field_id}"][0]["thumbnails"]["tiny"]["width"] == 21
assert response_json[f"field_{field_id}"][0]["thumbnails"]["tiny"]["height"] == 21
assert "original_name" not in response_json
assert "original_extension" not in response_json
assert "sha256_hash" not in response_json
response = api_client.patch(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": response_json["id"]},
),
{
f"field_{field_id}": [
{"name": user_file_3.name},
{"name": user_file_2.name, "visible_name": "new_name_1.txt"},
]
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert response_json[f"field_{field_id}"][0]["name"] == user_file_3.name
assert (
response_json[f"field_{field_id}"][0]["visible_name"]
== user_file_3.original_name
)
assert "localhost:8000" in response_json[f"field_{field_id}"][0]["url"]
assert response_json[f"field_{field_id}"][0]["is_image"] is False
assert response_json[f"field_{field_id}"][0]["image_width"] is None
assert response_json[f"field_{field_id}"][0]["image_height"] is None
assert response_json[f"field_{field_id}"][0]["thumbnails"] is None
assert response_json[f"field_{field_id}"][1]["name"] == user_file_2.name
assert response_json[f"field_{field_id}"][1]["visible_name"] == "new_name_1.txt"
response = api_client.patch(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": response_json["id"]},
),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.get(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": response_json["id"]},
),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert response_json[f"field_{field_id}"][0]["name"] == user_file_3.name
assert (
response_json[f"field_{field_id}"][0]["visible_name"]
== user_file_3.original_name
)
assert "localhost:8000" in response_json[f"field_{field_id}"][0]["url"]
assert response_json[f"field_{field_id}"][1]["name"] == user_file_2.name
assert response_json[f"field_{field_id}"][1]["visible_name"] == "new_name_1.txt"
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert len(response_json["results"]) == 3
assert response_json["results"][0][f"field_{field_id}"] == []
assert response_json["results"][1][f"field_{field_id}"] == []
assert (
response_json["results"][2][f"field_{field_id}"][0]["name"] == user_file_3.name
)
assert (
"localhost:8000" in response_json["results"][2][f"field_{field_id}"][0]["url"]
)
assert (
response_json["results"][2][f"field_{field_id}"][1]["name"] == user_file_2.name
)
# We also need to check if the grid view returns the correct url because the
# request context must be provided there in order to work.
url = reverse("api:database:views:grid:list", kwargs={"view_id": grid.id})
response = api_client.get(url, **{"HTTP_AUTHORIZATION": f"JWT {token}"})
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert len(response_json["results"]) == 3
assert response_json["results"][0][f"field_{field_id}"] == []
assert response_json["results"][1][f"field_{field_id}"] == []
assert (
response_json["results"][2][f"field_{field_id}"][0]["name"] == user_file_3.name
)
assert (
"localhost:8000" in response_json["results"][2][f"field_{field_id}"][0]["url"]
)
assert (
response_json["results"][2][f"field_{field_id}"][1]["name"] == user_file_2.name
)
@pytest.mark.django_db
def test_number_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
# Create a positive integer field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "PositiveInt",
"type": "number",
"number_type": "INTEGER",
"number_negative": False,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# Make sure the field was created properly
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "number"
assert NumberField.objects.all().count() == 1
positive_int_field_id = response_json["id"]
# Create a negative integer field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "NegativeInt",
"type": "number",
"number_type": "INTEGER",
"number_negative": True,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# Make sure the field was created properly
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "number"
assert NumberField.objects.all().count() == 2
negative_int_field_id = response_json["id"]
# Create a positive decimal field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "PositiveDecimal",
"type": "number",
"number_type": "DECIMAL",
"number_negative": False,
"number_decimal_places": 2,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# Make sure the field was created properly
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "number"
assert NumberField.objects.all().count() == 3
positive_decimal_field_id = response_json["id"]
# Create a negative decimal field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "NegativeDecimal",
"type": "number",
"number_type": "DECIMAL",
"number_negative": True,
"number_decimal_places": 2,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# Make sure the field was created properly
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "number"
assert NumberField.objects.all().count() == 4
negative_decimal_field_id = response_json["id"]
# Test re-writing the name of a field. 'PositiveInt' is now called 'PositiveIntEdit'
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": positive_int_field_id}),
{"name": "PositiveIntEdit"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
# Add a row with correct values
valid_pos_int = "99999999999999999999999999999999999999999999999999"
valid_neg_int = "-99999999999999999999999999999999999999999999999999"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{positive_int_field_id}": valid_pos_int,
f"field_{negative_int_field_id}": valid_neg_int,
f"field_{positive_decimal_field_id}": 1000.00,
f"field_{negative_decimal_field_id}": -1000.00,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{positive_int_field_id}"] == valid_pos_int
assert response_json[f"field_{negative_int_field_id}"] == valid_neg_int
assert response_json[f"field_{positive_decimal_field_id}"] == "1000.00"
assert response_json[f"field_{negative_decimal_field_id}"] == "-1000.00"
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.positiveintedit == Decimal(valid_pos_int)
assert row.negativeint == Decimal(valid_neg_int)
assert row.positivedecimal == Decimal(1000.00)
assert row.negativedecimal == Decimal(-1000.00)
# Add a row with Nones'
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{positive_int_field_id}": None,
f"field_{negative_int_field_id}": None,
f"field_{positive_decimal_field_id}": None,
f"field_{negative_decimal_field_id}": None,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{positive_int_field_id}"] is None
assert response_json[f"field_{negative_int_field_id}"] is None
assert response_json[f"field_{positive_decimal_field_id}"] is None
assert response_json[f"field_{negative_decimal_field_id}"] is None
row = model.objects.all().last()
assert row.positiveintedit is None
assert row.negativeint is None
assert row.positivedecimal is None
assert row.negativedecimal is None
# Add a row with an integer that's too big
invalid_pos_int = "999999999999999999999999999999999999999999999999999"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{positive_int_field_id}": invalid_pos_int,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"][f"field_{positive_int_field_id}"][0]["code"]
== "max_digits"
)
# Add a row with an integer that's too small
invalid_neg_int = "-9999999999999999999999999999999999999999999999999999"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{negative_int_field_id}": invalid_neg_int,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"][f"field_{positive_int_field_id}"][0]["code"]
== "max_digits"
)
@pytest.mark.django_db
def test_phone_number_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "phone", "type": "phone_number"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "phone_number"
assert PhoneNumberField.objects.all().count() == 1
field_id = response_json["id"]
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_id}),
{"name": "Phone"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
expected_phone_number = "+44761198672"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": expected_phone_number},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == expected_phone_number
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.phone == expected_phone_number
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": ""},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.phone == ""
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_id}": None},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.phone == ""
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json[f"field_{field_id}"] == ""
row = model.objects.all().last()
assert row.phone == ""
email = reverse("api:database:fields:item", kwargs={"field_id": field_id})
response = api_client.delete(email, HTTP_AUTHORIZATION=f"JWT {token}")
assert response.status_code == HTTP_200_OK
assert PhoneNumberField.objects.all().count() == 0
@pytest.mark.django_db
def test_last_modified_field_type(api_client, data_fixture):
time_under_test = "2021-08-10 12:00"
with freeze_time(time_under_test):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
# first add text field so that there is already a row with an
# updated_on value
text_field = data_fixture.create_text_field(user=user, table=table)
with freeze_time(time_under_test):
api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{text_field.id}": "Test Text"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# now add a last_modified field with datetime
with freeze_time(time_under_test):
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Last",
"type": "last_modified",
"date_include_time": True,
"timezone": "Europe/Berlin",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "last_modified"
assert LastModifiedField.objects.all().count() == 1
last_modified_field_id = response_json["id"]
assert last_modified_field_id
# verify that the timestamp is the same as the updated_on column
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.last == row.updated_on
# change the text_field value so that we can verify that the
# last_modified column gets updated as well
with freeze_time(time_under_test):
response = api_client.patch(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": row.id},
),
{f"field_{text_field.id}": "test_second"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response.json()
assert response.status_code == HTTP_200_OK
last_datetime = row.last
updated_on_datetime = row.updated_on
assert last_datetime == updated_on_datetime
with freeze_time(time_under_test):
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{last_modified_field_id}": "2021-08-05",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
with freeze_time(time_under_test):
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{last_modified_field_id}": "2021-08-09T14:14:33.574356Z",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
@pytest.mark.django_db
def test_created_on_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
# first add text field so that there is already a row with an
# updated_on and a created_on value
text_field = data_fixture.create_text_field(user=user, table=table)
api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{text_field.id}": "Test Text"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# now add a created_on field with datetime
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Create",
"type": "created_on",
"date_include_time": True,
"timezone": "Europe/Berlin",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "created_on"
assert CreatedOnField.objects.all().count() == 1
created_on_field_id = response_json["id"]
assert created_on_field_id
# verify that the timestamp is the same as the updated_on column
model = table.get_model(attribute_names=True)
row = model.objects.all().last()
assert row.create == row.created_on
# change the text_field value so that we can verify that the
# created_on column does NOT get updated
response = api_client.patch(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": row.id},
),
{f"field_{text_field.id}": "test_second"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response.json()
assert response.status_code == HTTP_200_OK
row = model.objects.all().last()
create_datetime = row.create
created_on_datetime = row.created_on
assert create_datetime == created_on_datetime
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{created_on_field_id}": "2021-08-05",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{created_on_field_id}": "2021-08-09T14:14:33.574356Z",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
@pytest.mark.django_db
def test_multiple_select_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
database = data_fixture.create_database_application(user=user, name="Placeholder")
table = data_fixture.create_database_table(name="Example", database=database)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Multi 1",
"type": "multiple_select",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
field_1_id = response_json["id"]
assert response_json["name"] == "Multi 1"
assert response_json["type"] == "multiple_select"
assert response_json["select_options"] == []
assert MultipleSelectField.objects.all().count() == 1
assert SelectOption.objects.all().count() == 0
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Multi 2",
"type": "multiple_select",
"select_options": [{"value": "Option 1", "color": "red"}],
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
field_2_id = response_json["id"]
select_options = SelectOption.objects.all()
assert len(select_options) == 1
assert select_options[0].field_id == field_2_id
assert select_options[0].value == "Option 1"
assert select_options[0].color == "red"
assert select_options[0].order == 0
assert response_json["name"] == "Multi 2"
assert response_json["type"] == "multiple_select"
assert response_json["select_options"] == [
{"id": select_options[0].id, "value": "Option 1", "color": "red"}
]
assert MultipleSelectField.objects.all().count() == 2
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_2_id}),
{"name": "New Multi 1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["name"] == "New Multi 1"
assert response_json["type"] == "multiple_select"
assert response_json["select_options"] == [
{"id": select_options[0].id, "value": "Option 1", "color": "red"}
]
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_2_id}),
{
"name": "New Multi 1",
"select_options": [
{"id": select_options[0].id, "value": "Option 1 B", "color": "red 2"},
{"value": "Option 2 B", "color": "blue 2"},
],
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
select_options = SelectOption.objects.all()
assert len(select_options) == 2
assert response_json["select_options"] == [
{"id": select_options[0].id, "value": "Option 1 B", "color": "red 2"},
{"id": select_options[1].id, "value": "Option 2 B", "color": "blue 2"},
]
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_2_id}),
{"name": "New Multi 1", "select_options": []},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert SelectOption.objects.all().count() == 0
assert response_json["select_options"] == []
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_2_id}),
{
"name": "New Multi 1",
"select_options": [
{"value": "Option 1 B", "color": "red 2"},
{"value": "Option 2 B", "color": "blue 2"},
],
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
select_options = SelectOption.objects.all()
assert len(select_options) == 2
response = api_client.delete(
reverse("api:database:fields:item", kwargs={"field_id": field_2_id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
assert MultipleSelectField.objects.all().count() == 1
assert SelectOption.objects.all().count() == 0
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": field_1_id}),
{
"select_options": [
{"value": "Option 1", "color": "red"},
{"value": "Option 2", "color": "blue"},
{"value": "Option 3", "color": "green"},
{"value": "Option 4", "color": "yellow"},
],
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
select_options = SelectOption.objects.all()
assert len(select_options) == 4
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_1_id}": "Nothing"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"][f"field_{field_1_id}"]["non_field_errors"][0]["code"]
== "not_a_list"
)
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_1_id}": [999999]},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"][f"field_{field_1_id}"][0][0]["code"] == "does_not_exist"
)
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_1_id}": [select_options[0].id]},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert response_json[f"field_{field_1_id}"][0]["id"] == select_options[0].id
assert response_json[f"field_{field_1_id}"][0]["value"] == "Option 1"
assert response_json[f"field_{field_1_id}"][0]["color"] == "red"
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_1_id}": [select_options[2].id]},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
row_id = response.json()["id"]
response = api_client.patch(
reverse(
"api:database:rows:item", kwargs={"table_id": table.id, "row_id": row_id}
),
{f"field_{field_1_id}": [select_options[2].id, select_options[0].id]},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
model = table.get_model()
rows = list(model.objects.all().enhance_by_fields())
assert len(rows) == 2
field_cell = getattr(rows[1], f"field_{field_1_id}").all()
assert field_cell[0].id == select_options[2].id
assert field_cell[1].id == select_options[0].id
# Create second multiple select field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Another Multi Field",
"type": "multiple_select",
"select_options": [
{"value": "Option 1", "color": "red"},
{"value": "Option 2", "color": "blue"},
],
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
field_2_id = response_json["id"]
field_2_select_options = response_json["select_options"]
all_select_options = SelectOption.objects.all()
assert len(all_select_options) == 6
assert MultipleSelectField.objects.all().count() == 2
# Make sure we can create a row with just one field
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{field_2_id}": [field_2_select_options[0]["id"]]},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response.json()
assert response.status_code == HTTP_200_OK
@pytest.mark.django_db
def test_formula_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula", "type": "formula", "formula": "'test'"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["type"] == "formula"
assert FormulaField.objects.all().count() == 1
formula_field_id = response_json["id"]
assert formula_field_id
# Create a row
api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# Verify the value of the formula field is the sql expression evaluated for that row
model = table.get_model(attribute_names=True)
row = model.objects.get()
assert row.formula == "test"
# You cannot modify a formula field row value
response = api_client.patch(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": row.id},
),
{f"field_{formula_field_id}": "test_second"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"]
== "Field of type formula is read only and should not be set manually."
)
# You cannot create a row with a formula field value
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{formula_field_id}": "some value",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"]
== "Field of type formula is read only and should not be set manually."
)
# You cannot create a field with an invalid formula
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula2", "type": "formula", "formula": "drop database baserow;"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_WITH_FORMULA"
assert "Invalid syntax" in response_json["detail"]
# You cannot create a field calling an invalid function
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula2", "type": "formula", "formula": "version()"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_WITH_FORMULA"
assert (
response_json["detail"]
== "Error with formula: version is not a valid function."
)
@pytest.mark.django_db
def test_lookup_field_type(api_client, data_fixture):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
table2 = data_fixture.create_database_table(user=user, database=table.database)
table_primary_field = data_fixture.create_text_field(
name="p", table=table, primary=True
)
data_fixture.create_text_field(name="primaryfield", table=table2, primary=True)
linkrowfield = FieldHandler().create_field(
user,
table,
"link_row",
name="linkrowfield",
link_row_table=table2,
)
looked_up_field = data_fixture.create_single_select_field(
table=table2, name="lookupfield"
)
option_a = data_fixture.create_select_option(
field=looked_up_field, value="A", color="blue"
)
option_b = data_fixture.create_select_option(
field=looked_up_field, value="B", color="red"
)
table2_model = table2.get_model(attribute_names=True)
table2_model.objects.create(lookupfield=option_a, primaryfield="primary a")
table2_model.objects.create(lookupfield=option_b, primaryfield="primary b")
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "lookupfield",
"type": "lookup",
"through_field_name": linkrowfield.name,
"target_field_name": looked_up_field.name,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
assert response_json["type"] == "lookup"
assert LookupField.objects.all().count() == 1
lookup_field_id = response_json["id"]
assert lookup_field_id
# Create a row
api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
# Verify the value is empty as there are no fields to lookup
table_model = table.get_model(attribute_names=True)
row = table_model.objects.get()
assert row.lookupfield == []
# You cannot modify a lookup field row value
response = api_client.patch(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": row.id},
),
{
f"field_{lookup_field_id}": [
{"value": {"value": "some value", "id": 1, "color": "red"}}
],
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"]
== "Field of type lookup is read only and should not be set manually."
)
# You cannot create a row with a lookup field value
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{
f"field_{lookup_field_id}": [
{"value": {"value": "some value", "id": 1, "color": "red"}}
],
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"]
== "Field of type lookup is read only and should not be set manually."
)
# You cannot create a lookup field without specifying through values
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "invalid", "type": "lookup"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_INVALID_LOOKUP_THROUGH_FIELD"
# You cannot create a lookup field without specifying target values
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "invalid", "type": "lookup", "through_field_id": linkrowfield.id},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_INVALID_LOOKUP_TARGET_FIELD"
# You cannot create a lookup field with a link row field in another table
other_table_linkrowfield = data_fixture.create_link_row_field()
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "invalid",
"type": "lookup",
"through_field_id": other_table_linkrowfield.id,
"target_field_id": looked_up_field.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_INVALID_LOOKUP_THROUGH_FIELD"
# You cannot create a lookup field with through field which is not a link field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "invalid",
"type": "lookup",
"through_field_id": table_primary_field.id,
"target_field_id": looked_up_field.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_INVALID_LOOKUP_THROUGH_FIELD"
# You cannot create a lookup field with through field name which is not a link field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "invalid",
"type": "lookup",
"through_field_name": table_primary_field.name,
"target_field_id": looked_up_field.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_INVALID_LOOKUP_THROUGH_FIELD"
# You cannot create a lookup field with an unknown through field name
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "invalid",
"type": "lookup",
"through_field_name": "unknown",
"target_field_id": looked_up_field.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_INVALID_LOOKUP_THROUGH_FIELD"
# You cannot create a lookup field with an trashed through field id
trashed_link_field = data_fixture.create_link_row_field(
trashed=True, table=table, name="trashed"
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "invalid",
"type": "lookup",
"through_field_id": trashed_link_field.id,
"target_field_id": looked_up_field.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_INVALID_LOOKUP_THROUGH_FIELD"
# You cannot create a lookup field with an trashed through field name
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "invalid",
"type": "lookup",
"through_field_name": trashed_link_field.name,
"target_field_id": looked_up_field.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_INVALID_LOOKUP_THROUGH_FIELD"
# You cannot create a lookup field with an unknown target field name
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "invalid",
"type": "lookup",
"through_field_name": linkrowfield.name,
"target_field_name": "unknown",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_INVALID_LOOKUP_TARGET_FIELD"
# You cannot create a lookup field with an trashed target field id
field_that_cant_be_used = data_fixture.create_text_field(
table=table2, trashed=True, name="trashed_looked_up"
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "invalid",
"type": "lookup",
"through_field_name": linkrowfield.name,
"target_field_id": field_that_cant_be_used.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_INVALID_LOOKUP_TARGET_FIELD"
# You cannot create a lookup field with an trashed target field name
field_that_cant_be_used = data_fixture.create_text_field(
table=table2, trashed=True, name="trashed_looked_up"
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "invalid",
"type": "lookup",
"through_field_name": linkrowfield.name,
"target_field_name": field_that_cant_be_used.name,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_INVALID_LOOKUP_TARGET_FIELD"
# You cannot create a lookup field with a target field that cant be used in
# formulas
field_that_cant_be_used = data_fixture.create_file_field(
table=table2, name="trashed_looked_up"
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "invalid",
"type": "lookup",
"through_field_name": linkrowfield.name,
"target_field_name": field_that_cant_be_used.name,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_WITH_FORMULA"
# You cannot create a lookup field with a through field with invalid id
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "invalid",
"type": "lookup",
"through_field_id": linkrowfield.name,
"target_field_name": looked_up_field.name,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
# You cannot create a lookup field with a through field with invalid id
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "invalid",
"type": "lookup",
"through_field_id": linkrowfield.id,
"target_field_id": looked_up_field.name,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
```
#### File: api/fields/test_formula_views.py
```python
import pytest
from django.urls import reverse
from rest_framework.status import (
HTTP_200_OK,
HTTP_204_NO_CONTENT,
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
)
from baserow.contrib.database.fields.handler import FieldHandler
@pytest.mark.django_db
def test_altering_value_of_referenced_field(
data_fixture, api_client, django_assert_num_queries
):
expected = "2"
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "number", "type": "number", "number_type": "INTEGER"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
number_field_id = response.json()["id"]
# Create a formula field referencing the normal number field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula2", "type": "formula", "formula": "field('number')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
formula_field_id = response.json()["id"]
# Create a row
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{f"field_{number_field_id}": 1},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
row_id = response.json()["id"]
assert response.status_code == 200, response.json()
# Assert the formula has calculated correctly
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] == expected
response = api_client.patch(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": row_id},
),
{f"field_{number_field_id}": 2},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
assert response.json()[f"field_{formula_field_id}"] == "3"
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] == "3"
@pytest.mark.django_db
def test_changing_type_of_reference_field_to_invalid_one_for_formula(
api_client, data_fixture
):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table, fields, rows = data_fixture.build_table(
columns=[("number", "number")], rows=[[1]], user=user
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula", "type": "formula", "formula": "field('number')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] == "2"
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": fields[0].id}),
{"type": "boolean"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["related_fields"][0]["id"] == formula_field_id
assert response_json["related_fields"][0]["formula_type"] == "invalid"
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] is None
response = api_client.get(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
assert "argument number 2" in response_json[1]["error"]
@pytest.mark.django_db
def test_changing_name_of_referenced_field_by_formula(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table, fields, rows = data_fixture.build_table(
columns=[("number", "number")], rows=[[1]], user=user
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula", "type": "formula", "formula": "field('number')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] == "2"
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": fields[0].id}),
{"name": "new_name"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] == "2"
@pytest.mark.django_db
def test_trashing_child_field(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table, fields, rows = data_fixture.build_table(
columns=[("number", "number")], rows=[[1]], user=user
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula", "type": "formula", "formula": "field('number')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] == "2"
response = api_client.delete(
reverse("api:database:fields:item", kwargs={"field_id": fields[0].id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response_json = response.json()
assert len(response_json["related_fields"]) == 1
assert response_json["related_fields"][0]["id"] == formula_field_id
assert (
"references the deleted or unknown field number"
in response_json["related_fields"][0]["error"]
)
response = api_client.get(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
assert "references the deleted or unknown field number" in response_json[0]["error"]
@pytest.mark.django_db
def test_trashing_restoring_child_field(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table, fields, rows = data_fixture.build_table(
columns=[("number", "number")], rows=[[1]], user=user
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula", "type": "formula", "formula": "field('number')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] == "2"
response = api_client.delete(
reverse("api:database:fields:item", kwargs={"field_id": fields[0].id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.get(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
assert "references the deleted or unknown field number" in response_json[0]["error"]
assert response_json[0]["formula"] == "field('number')+1"
response = api_client.patch(
reverse("api:trash:restore"),
{
"trash_item_type": "field",
"trash_item_id": fields[0].id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_204_NO_CONTENT
response = api_client.get(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
assert response_json[1]["error"] is None
assert response_json[1]["formula"] == f"field('{fields[0].name}')+1"
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] == "2"
@pytest.mark.django_db
def test_trashing_renaming_child_field(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table, fields, rows = data_fixture.build_table(
columns=[("number", "number"), ("number2", "number")], rows=[[1, 2]], user=user
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula", "type": "formula", "formula": "field('number')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] == "2"
response = api_client.delete(
reverse("api:database:fields:item", kwargs={"field_id": fields[0].id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.get(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
assert "references the deleted or unknown field number" in response_json[1]["error"]
assert response_json[1]["formula"] == "field('number')+1"
# We rename the other field to fit into the formula slot
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": fields[1].id}),
{"name": "number"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.get(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
assert response_json[1]["error"] is None
assert response_json[1]["formula"] == f"field('number')+1"
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] == "3"
@pytest.mark.django_db
def test_trashing_creating_child_field(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table, fields, rows = data_fixture.build_table(
columns=[("number", "number")], rows=[[1]], user=user
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula", "type": "formula", "formula": "field('number')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] == "2"
response = api_client.delete(
reverse("api:database:fields:item", kwargs={"field_id": fields[0].id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.get(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
assert "references the deleted or unknown field number" in response_json[0]["error"]
assert response_json[0]["formula"] == "field('number')+1"
# We create the another field to fit into the formula slot
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "number", "type": "number"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
response = api_client.get(
reverse("api:database:fields:item", kwargs={"field_id": formula_field_id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
assert response_json["error"] is None
assert response_json["formula"] == f"field('number')+1"
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] is None
@pytest.mark.django_db
def test_cant_make_self_reference(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula", "type": "formula", "formula": "field('Formula')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_FIELD_SELF_REFERENCE"
@pytest.mark.django_db
def test_cant_make_circular_reference(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula", "type": "formula", "formula": "1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
first_formula_field_id = response.json()["id"]
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula2", "type": "formula", "formula": "field('Formula')"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
response = api_client.patch(
reverse(
"api:database:fields:item", kwargs={"field_id": first_formula_field_id}
),
{"name": "Formula", "type": "formula", "formula": "field('Formula2')"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_FIELD_CIRCULAR_REFERENCE"
@pytest.mark.django_db
def test_changing_type_of_reference_field_to_valid_one_for_formula(
api_client, data_fixture
):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table, fields, rows = data_fixture.build_table(
columns=[("text", "text")], rows=[["1"], ["not a number"]], user=user
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Formula",
"type": "formula",
"formula": "concat(field('text'),'test')",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 2
assert response_json["results"][0][f"field_{formula_field_id}"] == "1test"
assert (
response_json["results"][1][f"field_{formula_field_id}"] == "not a numbertest"
)
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": fields[0].id}),
{"type": "number"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 2
assert response_json["results"][0][f"field_{formula_field_id}"] == "1test"
assert response_json["results"][1][f"field_{formula_field_id}"] == "test"
@pytest.mark.django_db
def test_can_set_number_of_decimal_places(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table, fields, rows = data_fixture.build_table(
columns=[("number", "number")], rows=[["1"], ["2"]], user=user
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Formula",
"type": "formula",
"formula": "1/4",
"number_type": "DECIMAL",
"number_decimal_places": 5,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 2
assert response_json["results"][0][f"field_{formula_field_id}"] == "0.25000"
assert response_json["results"][1][f"field_{formula_field_id}"] == "0.25000"
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": formula_field_id}),
{
"name": "Formula",
"type": "formula",
"formula": "1/4",
"number_type": "DECIMAL",
"number_decimal_places": 2,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 2
assert response_json["results"][0][f"field_{formula_field_id}"] == "0.25"
assert response_json["results"][1][f"field_{formula_field_id}"] == "0.25"
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": formula_field_id}),
{
"name": "Formula",
"type": "text",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 2
assert response_json["results"][0][f"field_{formula_field_id}"] == "0.25"
assert response_json["results"][1][f"field_{formula_field_id}"] == "0.25"
@pytest.mark.django_db
def test_altering_type_of_underlying_causes_type_update(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table, fields, rows = data_fixture.build_table(
columns=[("text", "text")], rows=[["1"], [None]], user=user
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Formula",
"type": "formula",
"formula": "field('text')",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 2
assert response_json["results"][0][f"field_{formula_field_id}"] == "1"
assert response_json["results"][1][f"field_{formula_field_id}"] is None
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": fields[0].id}),
{
"name": "text",
"type": "number",
"number_type": "DECIMAL",
"number_decimal_places": 2,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 2
assert response_json["results"][0][f"field_{formula_field_id}"] == "1.00"
assert response_json["results"][1][f"field_{formula_field_id}"] == "0.00"
@pytest.mark.django_db
def test_can_compare_date_and_text(api_client, data_fixture, django_assert_num_queries):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
data_fixture.create_date_field(
table=table,
date_include_time=True,
date_format="US",
name="Date",
)
data_fixture.create_text_field(table=table, name="Text")
model = table.get_model(attribute_names=True)
model.objects.create(date="2020-01-01 12:00", text="01/01/2020 12:00")
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Formula",
"type": "formula",
"formula": "field('Date')=field('Text')",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"]
@pytest.mark.django_db
def test_trashing_row_changing_formula_restoring_row(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table, fields, rows = data_fixture.build_table(
columns=[("number", "number")], rows=[[1], [2]], user=user
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula", "type": "formula", "formula": "field('number')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.delete(
reverse(
"api:database:rows:item",
kwargs={"table_id": table.id, "row_id": rows[0].id},
),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_204_NO_CONTENT
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] == "3"
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": formula_field_id}),
{
"formula": "'a'",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
model = table.get_model()
formula_values = model.objects_and_trash.values_list(
f"field_{formula_field_id}", flat=True
)
assert list(formula_values) == ["a", "a"]
response = api_client.patch(
reverse("api:trash:restore"),
{
"trash_item_type": "row",
"trash_item_id": rows[0].id,
"parent_trash_item_id": table.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_204_NO_CONTENT
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 2
assert response_json["results"][0][f"field_{formula_field_id}"] == "a"
assert response_json["results"][1][f"field_{formula_field_id}"] == "a"
@pytest.mark.django_db
def test_trashing_formula_field(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table, fields, rows = data_fixture.build_table(
columns=[("number", "number")], rows=[[1]], user=user
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula", "type": "formula", "formula": "field('number')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] == "2"
response = api_client.delete(
reverse("api:database:fields:item", kwargs={"field_id": formula_field_id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert f"field_{formula_field_id}" not in response_json["results"][0]
@pytest.mark.django_db
def test_can_type_an_invalid_formula_field(
data_fixture, api_client, django_assert_num_queries
):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "number", "type": "number", "number_type": "INTEGER"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
# Create a formula field referencing the normal number field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula2", "type": "formula", "formula": "field('number')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
formula_field_id = response.json()["id"]
response = api_client.post(
reverse(
"api:database:formula:type_formula", kwargs={"field_id": formula_field_id}
),
{f"formula": "1+'a'"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == 200, response_json
assert response_json["formula_type"] == "invalid"
assert "argument number 2" in response_json["error"]
@pytest.mark.django_db
def test_can_type_a_valid_formula_field(
data_fixture, api_client, django_assert_num_queries
):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "number", "type": "number", "number_type": "INTEGER"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
# Create a formula field referencing the normal number field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula2", "type": "formula", "formula": "field('number')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
formula_field_id = response.json()["id"]
response = api_client.post(
reverse(
"api:database:formula:type_formula", kwargs={"field_id": formula_field_id}
),
{f"formula": "1+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == 200, response_json
assert response_json == {
"date_format": None,
"date_include_time": None,
"date_time_format": None,
"error": None,
"formula": "1+1",
"formula_type": "number",
"array_formula_type": None,
"number_decimal_places": 0,
}
@pytest.mark.django_db
def test_type_endpoint_returns_error_for_bad_syntax(
data_fixture, api_client, django_assert_num_queries
):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "number", "type": "number", "number_type": "INTEGER"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
# Create a formula field referencing the normal number field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula2", "type": "formula", "formula": "field('number')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
formula_field_id = response.json()["id"]
response = api_client.post(
reverse(
"api:database:formula:type_formula", kwargs={"field_id": formula_field_id}
),
{f"formula": "bad syntax"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_WITH_FORMULA"
@pytest.mark.django_db
def test_type_endpoint_returns_error_for_missing_parameters(
data_fixture, api_client, django_assert_num_queries
):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "number", "type": "number", "number_type": "INTEGER"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
# Create a formula field referencing the normal number field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula2", "type": "formula", "formula": "field('number')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
formula_field_id = response.json()["id"]
response = api_client.post(
reverse(
"api:database:formula:type_formula", kwargs={"field_id": formula_field_id}
),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
@pytest.mark.django_db
def test_type_endpoint_returns_error_for_missing_field(
data_fixture, api_client, django_assert_num_queries
):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "number", "type": "number", "number_type": "INTEGER"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
response = api_client.post(
reverse("api:database:formula:type_formula", kwargs={"field_id": 9999}),
{f"formula": "bad syntax"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_404_NOT_FOUND
assert response_json["error"] == "ERROR_FIELD_DOES_NOT_EXIST"
@pytest.mark.django_db
def test_type_endpoint_returns_error_for_non_formula_field(
data_fixture, api_client, django_assert_num_queries
):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "number", "type": "number", "number_type": "INTEGER"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
number_field_id = response.json()["id"]
response = api_client.post(
reverse(
"api:database:formula:type_formula", kwargs={"field_id": number_field_id}
),
{f"formula": "bad syntax"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_404_NOT_FOUND
assert response_json["error"] == "ERROR_FIELD_DOES_NOT_EXIST"
@pytest.mark.django_db
def test_type_endpoint_returns_error_for_self_reference(
data_fixture, api_client, django_assert_num_queries
):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "formula", "type": "formula", "formula": "'a'"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
formula_field_id = response.json()["id"]
response = api_client.post(
reverse(
"api:database:formula:type_formula", kwargs={"field_id": formula_field_id}
),
{f"formula": "field('formula')"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_FIELD_SELF_REFERENCE"
@pytest.mark.django_db
def test_type_endpoint_returns_error_for_circular_reference(
data_fixture, api_client, django_assert_num_queries
):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
handler = FieldHandler()
first_formula = handler.create_field(
user, table, "formula", formula="1", name="first"
)
handler.create_field(
user, table, "formula", formula="field('first')", name="second"
)
response = api_client.post(
reverse(
"api:database:formula:type_formula", kwargs={"field_id": first_formula.id}
),
{f"formula": "field('second')"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_FIELD_CIRCULAR_REFERENCE"
@pytest.mark.django_db
def test_type_endpoint_returns_error_if_not_permissioned_for_field(
data_fixture, api_client, django_assert_num_queries
):
user, token = data_fixture.create_user_and_token()
other_user, other_token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "number", "type": "number", "number_type": "INTEGER"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
# Create a formula field referencing the normal number field
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula2", "type": "formula", "formula": "field('number')+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == 200, response.json()
formula_field_id = response.json()["id"]
response = api_client.post(
reverse(
"api:database:formula:type_formula", kwargs={"field_id": formula_field_id}
),
{f"formula": "1+1"},
format="json",
HTTP_AUTHORIZATION=f"JWT {other_token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_USER_NOT_IN_GROUP"
@pytest.mark.django_db
def test_altering_type_of_underlying_causes_type_update_nested(
api_client, data_fixture
):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table, fields, rows = data_fixture.build_table(
columns=[("text", "text")], rows=[["1"], [None]], user=user
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Formula",
"type": "formula",
"formula": "field('text')",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Formula2",
"type": "formula",
"formula": "field('Formula')",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
nested_formula_field_id = response_json["id"]
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 2
assert response_json["results"][0][f"field_{formula_field_id}"] == "1"
assert response_json["results"][1][f"field_{formula_field_id}"] is None
assert response_json["results"][0][f"field_{nested_formula_field_id}"] == "1"
assert response_json["results"][1][f"field_{nested_formula_field_id}"] is None
response = api_client.patch(
reverse("api:database:fields:item", kwargs={"field_id": fields[0].id}),
{
"name": "text",
"type": "number",
"number_type": "DECIMAL",
"number_decimal_places": 2,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 2
assert response_json["results"][0][f"field_{formula_field_id}"] == "1.00"
assert response_json["results"][1][f"field_{formula_field_id}"] == "0.00"
assert response_json["results"][0][f"field_{nested_formula_field_id}"] == "1.00"
assert response_json["results"][1][f"field_{nested_formula_field_id}"] == "0.00"
@pytest.mark.django_db
def test_deleting_underlying_causes_type_update_nested(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table, fields, rows = data_fixture.build_table(
columns=[("text", "text")], rows=[["1"], [None]], user=user
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Formula",
"type": "formula",
"formula": "field('text')",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Formula2",
"type": "formula",
"formula": "field('Formula')",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
nested_formula_field_id = response_json["id"]
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 2
assert response_json["results"][0][f"field_{formula_field_id}"] == "1"
assert response_json["results"][1][f"field_{formula_field_id}"] is None
assert response_json["results"][0][f"field_{nested_formula_field_id}"] == "1"
assert response_json["results"][1][f"field_{nested_formula_field_id}"] is None
response = api_client.delete(
reverse("api:database:fields:item", kwargs={"field_id": fields[0].id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
assert len(response_json["related_fields"]) == 2
assert (
response_json["related_fields"][0]["error"]
== "references the deleted or unknown field text"
)
assert (
response_json["related_fields"][1]["error"] == "references the deleted or "
"unknown field text"
)
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 2
assert response_json["results"][0][f"field_{formula_field_id}"] is None
assert response_json["results"][1][f"field_{formula_field_id}"] is None
assert response_json["results"][0][f"field_{nested_formula_field_id}"] is None
assert response_json["results"][1][f"field_{nested_formula_field_id}"] is None
@pytest.mark.django_db
def test_deleting_underlying_causes_type_update_nested_after_update(
api_client, data_fixture
):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table, fields, rows = data_fixture.build_table(
columns=[("text", "text")], rows=[["1"], [None]], user=user
)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Formula",
"type": "formula",
"formula": "field('text')",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{
"name": "Formula2",
"type": "formula",
"formula": "1",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
nested_formula_field_id = response_json["id"]
response = api_client.patch(
reverse(
"api:database:fields:item", kwargs={"field_id": nested_formula_field_id}
),
{
"formula": "field('Formula')",
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 2
assert response_json["results"][0][f"field_{formula_field_id}"] == "1"
assert response_json["results"][1][f"field_{formula_field_id}"] is None
assert response_json["results"][0][f"field_{nested_formula_field_id}"] == "1"
assert response_json["results"][1][f"field_{nested_formula_field_id}"] is None
response = api_client.delete(
reverse("api:database:fields:item", kwargs={"field_id": fields[0].id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
assert len(response_json["related_fields"]) == 2
assert (
response_json["related_fields"][0]["error"]
== "references the deleted or unknown field text"
)
assert (
response_json["related_fields"][1]["error"] == "references the deleted or "
"unknown field text"
)
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 2
assert response_json["results"][0][f"field_{formula_field_id}"] is None
assert response_json["results"][1][f"field_{formula_field_id}"] is None
assert response_json["results"][0][f"field_{nested_formula_field_id}"] is None
assert response_json["results"][1][f"field_{nested_formula_field_id}"] is None
@pytest.mark.django_db
def test_referencing_single_select(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
option_field = data_fixture.create_single_select_field(table=table, name="option")
option_a = data_fixture.create_select_option(
field=option_field, value="A", color="blue"
)
data_fixture.create_select_option(field=option_field, value="B", color="red")
table.get_model(attribute_names=True).objects.create(option=option_a)
response = api_client.post(
reverse("api:database:fields:list", kwargs={"table_id": table.id}),
{"name": "Formula", "type": "formula", "formula": "field('option')"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK, response_json
formula_field_id = response_json["id"]
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response_json["count"] == 1
assert response_json["results"][0][f"field_{formula_field_id}"] == {
"color": "blue",
"id": option_a.id,
"value": "A",
}
response = api_client.post(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
{},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK, response_json
response_json = response.json()
```
#### File: views/gallery/test_gallery_view_views.py
```python
import pytest
from django.shortcuts import reverse
from rest_framework.status import (
HTTP_200_OK,
HTTP_400_BAD_REQUEST,
HTTP_401_UNAUTHORIZED,
HTTP_404_NOT_FOUND,
)
@pytest.mark.django_db
def test_list_rows(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
text_field = data_fixture.create_text_field(
table=table, order=0, name="Color", text_default="white"
)
gallery = data_fixture.create_gallery_view(table=table)
gallery_2 = data_fixture.create_gallery_view()
model = gallery.table.get_model()
row_1 = model.objects.create(**{f"field_{text_field.id}": "Green"})
row_2 = model.objects.create()
row_3 = model.objects.create(**{f"field_{text_field.id}": "Orange"})
row_4 = model.objects.create(**{f"field_{text_field.id}": "Purple"})
url = reverse("api:database:views:gallery:list", kwargs={"view_id": 999})
response = api_client.get(url, **{"HTTP_AUTHORIZATION": f"JWT {token}"})
assert response.status_code == HTTP_404_NOT_FOUND
assert response.json()["error"] == "ERROR_GALLERY_DOES_NOT_EXIST"
url = reverse("api:database:views:gallery:list", kwargs={"view_id": gallery_2.id})
response = api_client.get(url, **{"HTTP_AUTHORIZATION": f"JWT {token}"})
assert response.status_code == HTTP_400_BAD_REQUEST
assert response.json()["error"] == "ERROR_USER_NOT_IN_GROUP"
url = reverse("api:database:views:gallery:list", kwargs={"view_id": gallery.id})
response = api_client.get(
url, {"limit": 2}, **{"HTTP_AUTHORIZATION": f"JWT {token}"}
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["count"] == 4
assert response_json["results"][0]["id"] == row_1.id
assert response_json["results"][1]["id"] == row_2.id
assert "field_options" not in response_json
url = reverse("api:database:views:gallery:list", kwargs={"view_id": gallery.id})
response = api_client.get(
url, {"limit": 1, "offset": 2}, **{"HTTP_AUTHORIZATION": f"JWT {token}"}
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["count"] == 4
assert response_json["results"][0]["id"] == row_3.id
sort = data_fixture.create_view_sort(view=gallery, field=text_field, order="ASC")
url = reverse("api:database:views:gallery:list", kwargs={"view_id": gallery.id})
response = api_client.get(url, **{"HTTP_AUTHORIZATION": f"JWT {token}"})
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["count"] == 4
assert response_json["results"][0]["id"] == row_1.id
assert response_json["results"][1]["id"] == row_3.id
assert response_json["results"][2]["id"] == row_4.id
assert response_json["results"][3]["id"] == row_2.id
sort.delete()
view_filter = data_fixture.create_view_filter(
view=gallery, field=text_field, value="Green"
)
url = reverse("api:database:views:gallery:list", kwargs={"view_id": gallery.id})
response = api_client.get(url, **{"HTTP_AUTHORIZATION": f"JWT {token}"})
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["count"] == 1
assert len(response_json["results"]) == 1
assert response_json["results"][0]["id"] == row_1.id
view_filter.delete()
url = reverse("api:database:views:gallery:list", kwargs={"view_id": gallery.id})
response = api_client.get(
url, data={"count": ""}, **{"HTTP_AUTHORIZATION": f"JWT {token}"}
)
response_json = response.json()
assert response_json["count"] == 4
assert len(response_json.keys()) == 1
row_1.delete()
row_2.delete()
row_3.delete()
row_4.delete()
url = reverse("api:database:views:gallery:list", kwargs={"view_id": gallery.id})
response = api_client.get(url, **{"HTTP_AUTHORIZATION": f"JWT {token}"})
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["count"] == 0
assert not response_json["previous"]
assert not response_json["next"]
assert len(response_json["results"]) == 0
url = reverse("api:database:views:gallery:list", kwargs={"view_id": gallery.id})
response = api_client.get(url)
assert response.status_code == HTTP_401_UNAUTHORIZED
data_fixture.create_template(group=gallery.table.database.group)
url = reverse("api:database:views:gallery:list", kwargs={"view_id": gallery.id})
response = api_client.get(url)
assert response.status_code == HTTP_200_OK
@pytest.mark.django_db
def test_list_rows_include_field_options(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>", password="password", first_name="Test1"
)
table = data_fixture.create_database_table(user=user)
text_field = data_fixture.create_text_field(
table=table, order=0, name="Color", text_default="white"
)
gallery = data_fixture.create_gallery_view(table=table)
# The second field is deliberately created after the creation of the gallery field
# so that the GridViewFieldOptions entry is not created. This should
# automatically be created when the page is fetched.
number_field = data_fixture.create_number_field(
table=table, order=1, name="Horsepower"
)
url = reverse("api:database:views:gallery:list", kwargs={"view_id": gallery.id})
response = api_client.get(url, **{"HTTP_AUTHORIZATION": f"JWT {token}"})
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert "field_options" not in response_json
url = reverse("api:database:views:gallery:list", kwargs={"view_id": gallery.id})
response = api_client.get(
url, {"include": "field_options"}, **{"HTTP_AUTHORIZATION": f"JWT {token}"}
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json["field_options"]) == 2
assert response_json["field_options"][str(text_field.id)]["hidden"] is True
assert response_json["field_options"][str(text_field.id)]["order"] == 32767
assert response_json["field_options"][str(number_field.id)]["hidden"] is True
assert response_json["field_options"][str(number_field.id)]["order"] == 32767
assert "filters_disabled" not in response_json
@pytest.mark.django_db
def test_patch_gallery_view_field_options(api_client, data_fixture):
user, token = data_fixture.create_user_and_token(
email="<EMAIL>",
password="password",
first_name="Test1",
)
table = data_fixture.create_database_table(user=user)
text_field = data_fixture.create_text_field(table=table)
gallery = data_fixture.create_gallery_view(table=table)
url = reverse("api:database:views:field_options", kwargs={"view_id": gallery.id})
response = api_client.patch(
url,
{"field_options": {text_field.id: {"width": 300, "hidden": False}}},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json["field_options"]) == 1
assert response_json["field_options"][str(text_field.id)]["hidden"] is False
assert response_json["field_options"][str(text_field.id)]["order"] == 32767
options = gallery.get_field_options()
assert len(options) == 1
assert options[0].field_id == text_field.id
assert options[0].hidden is False
assert options[0].order == 32767
@pytest.mark.django_db
def test_create_gallery_view(api_client, data_fixture):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
file_field = data_fixture.create_file_field(table=table)
response = api_client.post(
reverse("api:database:views:list", kwargs={"table_id": table.id}),
{
"name": "<NAME>",
"type": "gallery",
"filter_type": "AND",
"filters_disabled": False,
"card_cover_image_field": file_field.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["name"] == "<NAME>"
assert response_json["type"] == "gallery"
assert response_json["filter_type"] == "AND"
assert response_json["filters_disabled"] is False
assert response_json["card_cover_image_field"] == file_field.id
@pytest.mark.django_db
def test_create_gallery_view_invalid_card_card_cover_image_field(
api_client, data_fixture
):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
text = data_fixture.create_text_field(table=table)
file_field = data_fixture.create_file_field()
response = api_client.post(
reverse("api:database:views:list", kwargs={"table_id": table.id}),
{"name": "Test 2", "type": "gallery", "card_cover_image_field": text.id},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"]["card_cover_image_field"][0]["code"] == "does_not_exist"
)
response = api_client.post(
reverse("api:database:views:list", kwargs={"table_id": table.id}),
{"name": "Test 2", "type": "gallery", "card_cover_image_field": file_field.id},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_FIELD_NOT_IN_TABLE"
@pytest.mark.django_db
def test_update_gallery_view(api_client, data_fixture):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
gallery_view = data_fixture.create_gallery_view(table=table)
response = api_client.patch(
reverse("api:database:views:item", kwargs={"view_id": gallery_view.id}),
{
"name": "<NAME>",
"type": "gallery",
"filter_type": "AND",
"filters_disabled": False,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["name"] == "<NAME>"
assert response_json["type"] == "gallery"
assert response_json["filter_type"] == "AND"
assert response_json["filters_disabled"] is False
```
#### File: database/field/test_rating_field_type.py
```python
import pytest
from django.core.exceptions import ValidationError
from faker import Faker
from decimal import Decimal
from baserow.contrib.database.fields.handler import FieldHandler
from baserow.contrib.database.fields.models import (
RatingField,
)
from baserow.contrib.database.rows.handler import RowHandler
@pytest.mark.django_db
def test_field_creation(data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
field = data_fixture.create_text_field(table=table, order=1, name="name")
handler = FieldHandler()
field = handler.create_field(
user=user,
table=table,
type_name="rating",
name="rating",
max_value=4,
color="red",
style="flag",
)
assert len(RatingField.objects.all()) == 1
from_db = RatingField.objects.get(name="rating")
assert from_db.color == "red"
assert from_db.max_value == 4
assert from_db.style == "flag"
fake = Faker()
value = fake.random_int(1, 4)
model = table.get_model(attribute_names=True)
row = model.objects.create(rating=value, name="Test")
assert row.rating == value
assert row.name == "Test"
handler.delete_field(user=user, field=field)
assert len(RatingField.objects.all()) == 0
for invalid_value in [
{"max_value": 11},
{"max_value": 0},
{"max_value": -2},
{"style": "invalid"},
{"style": ""},
{"color": None},
{"color": ""},
]:
with pytest.raises(ValueError):
handler.create_field(
user=user,
table=table,
type_name="rating",
name="rating invalid",
**invalid_value
)
@pytest.mark.django_db
def test_row_creation(data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
data_fixture.create_database_table(user=user, database=table.database)
data_fixture.create_text_field(table=table, order=1, name="name")
field_handler = FieldHandler()
row_handler = RowHandler()
field_handler.create_field(
user=user, table=table, type_name="rating", name="rating"
)
assert len(RatingField.objects.all()) == 1
model = table.get_model(attribute_names=True)
row1 = row_handler.create_row(
user=user, table=table, values={"rating": 3}, model=model
)
row_handler.create_row(user=user, table=table, values={"rating": 0}, model=model)
row_handler.create_row(user=user, table=table, values={"rating": None}, model=model)
row_handler.create_row(user=user, table=table, values={}, model=model)
assert [(f.id, f.rating) for f in model.objects.all()] == [
(1, 3),
(2, 0),
(3, 0),
(4, 0),
]
row_handler.update_row(
user_field_names=True,
user=user,
row_id=row1.id,
table=table,
values={"rating": 1},
)
assert [(f.id, f.rating) for f in model.objects.all()] == [
(1, 1),
(2, 0),
(3, 0),
(4, 0),
]
for invalid_value in [-1, 6]:
with pytest.raises(ValidationError):
row_handler.create_row(
user=user, table=table, values={"rating": invalid_value}, model=model
)
row_handler.update_row(
user=user,
row_id=row1.id,
table=table,
values={"rating": invalid_value},
)
@pytest.mark.django_db
def test_rating_field_modification(data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
data_fixture.create_database_table(user=user, database=table.database)
text_field = data_fixture.create_text_field(table=table, order=1, name="text")
field_handler = FieldHandler()
row_handler = RowHandler()
rating_field = field_handler.create_field(
user=user, table=table, type_name="rating", name="Rating"
)
integer_field = data_fixture.create_number_field(
table=table,
name="integer",
number_type="INTEGER",
number_negative=True,
)
decimal_field = data_fixture.create_number_field(
table=table,
name="decimal",
number_type="DECIMAL",
number_negative=True,
)
boolean_field = data_fixture.create_boolean_field(table=table, name="boolean")
model = table.get_model(attribute_names=True)
row_handler.create_row(
user=user,
table=table,
values={
"text": "5",
"rating": 5,
"integer": 5,
"decimal": 4.5,
"boolean": True,
},
model=model,
)
row_handler.create_row(
user=user,
table=table,
values={
"text": "3",
"rating": 4,
"integer": 3,
"decimal": 2.5,
"boolean": False,
},
model=model,
)
row3 = row_handler.create_row(
user=user,
table=table,
values={"text": "1", "rating": 3, "integer": 1, "decimal": 1.3},
model=model,
)
row_handler.create_row(
user=user,
table=table,
values={"text": "1.5", "rating": 2, "integer": -1, "decimal": -1.2},
model=model,
)
row_handler.create_row(
user=user,
table=table,
values={"text": "invalid", "rating": 1, "integer": -5, "decimal": -7},
model=model,
)
row_handler.create_row(
user=user,
table=table,
values={"text": "0", "rating": 0, "integer": 0, "decimal": 0},
model=model,
)
row_handler.create_row(
user=user,
table=table,
values={
"text": None,
"rating": None,
"integer": None,
"number": None,
},
model=model,
)
# Convert text field to rating
field_handler.update_field(
user=user, field=text_field, new_type_name="rating", max_value=3
)
# Change max_value
field_handler.update_field(user=user, field=rating_field, max_value=3)
# Change field type from number -> rating
field_handler.update_field(
user=user,
field=integer_field,
new_type_name="rating",
max_value=3,
)
field_handler.update_field(
user=user,
field=decimal_field,
new_type_name="rating",
max_value=3,
)
field_handler.update_field(
user=user,
field=boolean_field,
new_type_name="rating",
max_value=3,
)
# Check value clamping on max_value modification
assert [
(f.id, f.text, f.rating, f.integer, f.decimal, f.boolean)
for f in model.objects.all()
] == [
(1, 3, 3, 3, 3, 1),
(2, 3, 3, 3, 3, 0),
(3, 1, 3, 1, 1, 0),
(4, 2, 2, 0, 0, 0),
(5, 0, 1, 0, 0, 0),
(6, 0, 0, 0, 0, 0),
(7, 0, 0, 0, 0, 0),
]
# Change boolean field to test conversion back with value != [0,1]
row_handler.update_row(
user=user,
row_id=row3.id,
table=table,
user_field_names=True,
values={"boolean": 3},
)
# Convert back field to original type
field_handler.update_field(user=user, field=text_field, new_type_name="text")
field_handler.update_field(
user=user,
field=integer_field,
new_type_name="number",
number_type="INTEGER",
number_negative=True,
)
field_handler.update_field(
user=user,
field=decimal_field,
new_type_name="number",
number_type="DECIMAL",
number_negative=True,
number_decimal_places=2,
)
field_handler.update_field(
user=user,
field=boolean_field,
new_type_name="boolean",
)
assert [
(f.id, f.text, f.integer, f.decimal, f.boolean) for f in model.objects.all()
] == [
(1, "3", Decimal("3"), Decimal("3.00"), True),
(2, "3", Decimal("3"), Decimal("3.00"), False),
(3, "1", Decimal("1"), Decimal("1.00"), True),
(4, "2", Decimal("0"), Decimal("0.00"), False),
(5, "0", Decimal("0"), Decimal("0.00"), False),
(6, "0", Decimal("0"), Decimal("0.00"), False),
(7, "0", Decimal("0"), Decimal("0.00"), False),
]
```
#### File: baserow_premium/views/handler.py
```python
from typing import Dict, Union
from collections import defaultdict
from django.db.models import Q, Count
from baserow.contrib.database.table.models import Table, GeneratedTableModel
from baserow.contrib.database.fields.models import SingleSelectField
def get_rows_grouped_by_single_select_field(
table: Table,
single_select_field: SingleSelectField,
option_settings: Dict[str, Dict[str, int]] = None,
default_limit: int = 40,
default_offset: int = 0,
model: GeneratedTableModel = None,
) -> Dict[str, Dict[str, Union[int, list]]]:
"""
This method fetches the rows grouped by a single select field in a query
efficient manner. Optionally `limit` and `offset` settings can be provided per
option. If the option settings not provided, then rows for all the select options
will be fetched. If one or more options have been provided, then only the rows
for those will be fetched.
Example:
get_rows_grouped_by_single_select_field(
...
options_settings={
"1": {"limit": 10, "offset": 10},
"2": {"limit": 10, "offset": 20}
}
)
:param table: The table where to fetch the rows from.
:param single_select_field: The single select field where the rows must be
grouped by.
:param option_settings: Optionally, additional `limit` and `offset`
configurations per field option can be provided.
:param default_limit: The default limit that applies to all options if no
specific settings for that field have been provided.
:param default_offset: The default offset that applies to all options if no
specific settings for that field have been provided.
:param model: Additionally, an existing model can be provided so that it doesn't
have to be generated again.
:return: The fetched rows including the total count.
"""
if option_settings is None:
option_settings = {}
if model is None:
model = table.get_model()
base_queryset = model.objects.all().enhance_by_fields().order_by("order", "id")
all_filters = Q()
count_aggregates = {}
all_options = list(single_select_field.select_options.all())
all_option_ids = [option.id for option in all_options]
def get_id_and_string(option):
return (
option.id if option else None,
str(option.id) if option else "null",
)
for select_option in [None] + all_options:
option_id, option_string = get_id_and_string(select_option)
# If option settings have been provided, we only want to return rows for
# those options, otherwise we will include all options.
if len(option_settings) > 0 and option_string not in option_settings:
continue
option_setting = option_settings.get(option_string, {})
limit = option_setting.get("limit", default_limit)
offset = option_setting.get("offset", default_offset)
if option_id is None:
# Somehow the `Count` aggregate doesn't support an empty `__in` lookup.
# That's why we always add the `-1` value that never exists to make sure
# there is always a value in there.
filters = ~Q(
**{f"field_{single_select_field.id}_id__in": all_option_ids + [-1]}
)
else:
filters = Q(**{f"field_{single_select_field.id}_id": option_id})
# We don't want to execute a single query for each select option,
# so we create a subquery that finds the ids of the rows related to the
# option group. After the single query has been executed we can group the rows.
sub_queryset = base_queryset.filter(filters).values_list("id", flat=True)[
offset : offset + limit
]
all_filters |= Q(id__in=sub_queryset)
# Same goes for fetching the total count. We will construct a single query,
# that calculates to total amount of rows per option.
count_aggregates[option_string] = Count(
"pk",
filter=filters,
)
queryset = list(base_queryset.filter(all_filters))
counts = model.objects.aggregate(**count_aggregates)
rows = defaultdict(lambda: {"count": 0, "results": []})
for row in queryset:
option_id = getattr(row, f"field_{single_select_field.id}_id")
option_string = str(option_id) if option_id in all_option_ids else "null"
rows[option_string]["results"].append(row)
for key, value in counts.items():
rows[key]["count"] = value
return rows
```
#### File: views/views/test_kanban_views.py
```python
import pytest
from django.shortcuts import reverse
from django.test.utils import override_settings
from rest_framework.status import (
HTTP_200_OK,
HTTP_400_BAD_REQUEST,
HTTP_402_PAYMENT_REQUIRED,
HTTP_404_NOT_FOUND,
)
from baserow_premium.views.models import KanbanView
@pytest.mark.django_db
@override_settings(DEBUG=True)
def test_list_without_valid_premium_license(api_client, premium_data_fixture):
user, token = premium_data_fixture.create_user_and_token(
has_active_premium_license=False
)
kanban = premium_data_fixture.create_kanban_view(user=user)
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban.id})
response = api_client.get(url, **{"HTTP_AUTHORIZATION": f"JWT {token}"})
assert response.status_code == HTTP_402_PAYMENT_REQUIRED
assert response.json()["error"] == "ERROR_NO_ACTIVE_PREMIUM_LICENSE"
# The kanban view should work if it's a template.
premium_data_fixture.create_template(group=kanban.table.database.group)
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban.id})
response = api_client.get(url, **{"HTTP_AUTHORIZATION": f"JWT {token}"})
assert response.status_code == HTTP_200_OK
@pytest.mark.django_db
@override_settings(DEBUG=True)
def test_list_rows_invalid_parameters(api_client, premium_data_fixture):
user, token = premium_data_fixture.create_user_and_token(
email="<EMAIL>",
password="password",
first_name="Test1",
has_active_premium_license=True,
)
kanban = premium_data_fixture.create_kanban_view(
user=user, single_select_field=None
)
kanban_2 = premium_data_fixture.create_kanban_view()
url = reverse("api:database:views:kanban:list", kwargs={"view_id": 0})
response = api_client.get(url, **{"HTTP_AUTHORIZATION": f"JWT {token}"})
assert response.status_code == HTTP_404_NOT_FOUND
assert response.json()["error"] == "ERROR_KANBAN_DOES_NOT_EXIST"
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban_2.id})
response = api_client.get(url, **{"HTTP_AUTHORIZATION": f"JWT {token}"})
assert response.status_code == HTTP_400_BAD_REQUEST
assert response.json()["error"] == "ERROR_USER_NOT_IN_GROUP"
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban.id})
response = api_client.get(url, **{"HTTP_AUTHORIZATION": f"JWT {token}"})
assert response.status_code == HTTP_400_BAD_REQUEST
assert response.json()["error"] == "ERROR_KANBAN_VIEW_HAS_NO_SINGLE_SELECT_FIELD"
@pytest.mark.django_db
@override_settings(DEBUG=True)
def test_list_rows_include_field_options(api_client, premium_data_fixture):
user, token = premium_data_fixture.create_user_and_token(
email="<EMAIL>",
password="password",
first_name="Test1",
has_active_premium_license=True,
)
table = premium_data_fixture.create_database_table(user=user)
text_field = premium_data_fixture.create_text_field(table=table, primary=True)
single_select_field = premium_data_fixture.create_single_select_field(table=table)
kanban = premium_data_fixture.create_kanban_view(
table=table, single_select_field=single_select_field
)
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban.id})
response = api_client.get(
f"{url}?include=field_options", **{"HTTP_AUTHORIZATION": f"JWT {token}"}
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json["field_options"]) == 2
assert response_json["field_options"][str(text_field.id)]["hidden"] is True
assert response_json["field_options"][str(text_field.id)]["order"] == 32767
assert response_json["field_options"][str(single_select_field.id)]["hidden"] is True
assert response_json["field_options"][str(single_select_field.id)]["order"] == 32767
@pytest.mark.django_db
@override_settings(DEBUG=True)
def test_list_all_rows(api_client, premium_data_fixture):
user, token = premium_data_fixture.create_user_and_token(
has_active_premium_license=True
)
table = premium_data_fixture.create_database_table(user=user)
text_field = premium_data_fixture.create_text_field(table=table, primary=True)
single_select_field = premium_data_fixture.create_single_select_field(table=table)
option_a = premium_data_fixture.create_select_option(
field=single_select_field, value="A", color="blue"
)
option_b = premium_data_fixture.create_select_option(
field=single_select_field, value="B", color="red"
)
kanban = premium_data_fixture.create_kanban_view(
table=table, single_select_field=single_select_field
)
model = table.get_model()
row_none = model.objects.create(
**{
f"field_{text_field.id}": "Row None",
f"field_{single_select_field.id}_id": None,
}
)
row_a1 = model.objects.create(
**{
f"field_{text_field.id}": "Row A1",
f"field_{single_select_field.id}_id": option_a.id,
}
)
row_a2 = model.objects.create(
**{
f"field_{text_field.id}": "Row A2",
f"field_{single_select_field.id}_id": option_a.id,
}
)
row_b1 = model.objects.create(
**{
f"field_{text_field.id}": "Row B1",
f"field_{single_select_field.id}_id": option_b.id,
}
)
row_b2 = model.objects.create(
**{
f"field_{text_field.id}": "Row B2",
f"field_{single_select_field.id}_id": option_b.id,
}
)
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban.id})
response = api_client.get(url, **{"HTTP_AUTHORIZATION": f"JWT {token}"})
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json["rows"]) == 3
assert response_json["rows"]["null"]["count"] == 1
assert len(response_json["rows"]["null"]["results"]) == 1
assert response_json["rows"]["null"]["results"][0] == {
"id": row_none.id,
"order": "1.00000000000000000000",
f"field_{text_field.id}": "Row None",
f"field_{single_select_field.id}": None,
}
assert response_json["rows"][str(option_a.id)]["count"] == 2
assert len(response_json["rows"][str(option_a.id)]["results"]) == 2
assert response_json["rows"][str(option_a.id)]["results"][0] == {
"id": row_a1.id,
"order": "1.00000000000000000000",
f"field_{text_field.id}": "Row A1",
f"field_{single_select_field.id}": {
"id": option_a.id,
"value": "A",
"color": "blue",
},
}
assert response_json["rows"][str(option_a.id)]["results"][1] == {
"id": row_a2.id,
"order": "1.00000000000000000000",
f"field_{text_field.id}": "Row A2",
f"field_{single_select_field.id}": {
"id": option_a.id,
"value": "A",
"color": "blue",
},
}
assert response_json["rows"][str(option_b.id)]["count"] == 2
assert len(response_json["rows"][str(option_b.id)]["results"]) == 2
assert response_json["rows"][str(option_b.id)]["results"][0] == {
"id": row_b1.id,
"order": "1.00000000000000000000",
f"field_{text_field.id}": "Row B1",
f"field_{single_select_field.id}": {
"id": option_b.id,
"value": "B",
"color": "red",
},
}
assert response_json["rows"][str(option_b.id)]["results"][1] == {
"id": row_b2.id,
"order": "1.00000000000000000000",
f"field_{text_field.id}": "Row B2",
f"field_{single_select_field.id}": {
"id": option_b.id,
"value": "B",
"color": "red",
},
}
@pytest.mark.django_db
@override_settings(DEBUG=True)
def test_list_with_specific_select_options(api_client, premium_data_fixture):
user, token = premium_data_fixture.create_user_and_token(
has_active_premium_license=True
)
table = premium_data_fixture.create_database_table(user=user)
single_select_field = premium_data_fixture.create_single_select_field(table=table)
option_a = premium_data_fixture.create_select_option(
field=single_select_field, value="A", color="blue"
)
kanban = premium_data_fixture.create_kanban_view(
table=table, single_select_field=single_select_field
)
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban.id})
response = api_client.get(
f"{url}?select_option={option_a.id}",
**{"HTTP_AUTHORIZATION": f"JWT" f" {token}"},
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json) == 1
assert response_json["rows"][str(option_a.id)]["count"] == 0
assert len(response_json["rows"][str(option_a.id)]["results"]) == 0
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban.id})
response = api_client.get(
f"{url}?select_option=null",
**{"HTTP_AUTHORIZATION": f"JWT" f" {token}"},
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json) == 1
assert response_json["rows"]["null"]["count"] == 0
assert len(response_json["rows"]["null"]["results"]) == 0
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban.id})
response = api_client.get(
f"{url}?select_option={option_a.id}&select_option=null",
**{"HTTP_AUTHORIZATION": f"JWT" f" {token}"},
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json["rows"]) == 2
assert response_json["rows"]["null"]["count"] == 0
assert len(response_json["rows"]["null"]["results"]) == 0
assert response_json["rows"][str(option_a.id)]["count"] == 0
assert len(response_json["rows"][str(option_a.id)]["results"]) == 0
@pytest.mark.django_db
@override_settings(DEBUG=True)
def test_list_all_rows_with_limit_and_offset(api_client, premium_data_fixture):
user, token = premium_data_fixture.create_user_and_token(
has_active_premium_license=True
)
table = premium_data_fixture.create_database_table(user=user)
single_select_field = premium_data_fixture.create_single_select_field(table=table)
option_a = premium_data_fixture.create_select_option(
field=single_select_field, value="A", color="blue"
)
kanban = premium_data_fixture.create_kanban_view(
table=table, single_select_field=single_select_field
)
model = table.get_model()
row_none1 = model.objects.create(
**{
f"field_{single_select_field.id}_id": None,
}
)
row_none2 = model.objects.create(
**{
f"field_{single_select_field.id}_id": None,
}
)
row_a1 = model.objects.create(
**{
f"field_{single_select_field.id}_id": option_a.id,
}
)
row_a2 = model.objects.create(
**{
f"field_{single_select_field.id}_id": option_a.id,
}
)
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban.id})
response = api_client.get(
f"{url}?limit=1&offset=1", **{"HTTP_AUTHORIZATION": f"JWT {token}"}
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json["rows"]) == 2
assert response_json["rows"]["null"]["count"] == 2
assert len(response_json["rows"]["null"]["results"]) == 1
assert response_json["rows"]["null"]["results"][0]["id"] == row_none2.id
assert response_json["rows"][str(option_a.id)]["count"] == 2
assert len(response_json["rows"][str(option_a.id)]["results"]) == 1
assert response_json["rows"][str(option_a.id)]["results"][0]["id"] == row_a2.id
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban.id})
response = api_client.get(
f"{url}?select_option=null,1,1", **{"HTTP_AUTHORIZATION": f"JWT {token}"}
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json) == 1
assert response_json["rows"]["null"]["count"] == 2
assert len(response_json["rows"]["null"]["results"]) == 1
assert response_json["rows"]["null"]["results"][0]["id"] == row_none2.id
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban.id})
response = api_client.get(
f"{url}?select_option={option_a.id},1,1",
**{"HTTP_AUTHORIZATION": f"JWT" f" {token}"},
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json) == 1
assert response_json["rows"][str(option_a.id)]["count"] == 2
assert len(response_json["rows"][str(option_a.id)]["results"]) == 1
assert response_json["rows"][str(option_a.id)]["results"][0]["id"] == row_a2.id
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban.id})
response = api_client.get(
f"{url}?select_option={option_a.id},1,1&select_option=null,2,0",
**{"HTTP_AUTHORIZATION": f"JWT" f" {token}"},
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json["rows"]) == 2
assert response_json["rows"]["null"]["count"] == 2
assert len(response_json["rows"]["null"]["results"]) == 2
assert response_json["rows"]["null"]["results"][0]["id"] == row_none1.id
assert response_json["rows"]["null"]["results"][1]["id"] == row_none2.id
assert response_json["rows"][str(option_a.id)]["count"] == 2
assert len(response_json["rows"][str(option_a.id)]["results"]) == 1
assert response_json["rows"][str(option_a.id)]["results"][0]["id"] == row_a2.id
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban.id})
response = api_client.get(
f"{url}?select_option={option_a.id},2,0&select_option=null&limit=1&offset=1",
**{"HTTP_AUTHORIZATION": f"JWT" f" {token}"},
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json["rows"]) == 2
assert response_json["rows"]["null"]["count"] == 2
assert len(response_json["rows"]["null"]["results"]) == 1
assert response_json["rows"]["null"]["results"][0]["id"] == row_none2.id
assert response_json["rows"][str(option_a.id)]["count"] == 2
assert len(response_json["rows"][str(option_a.id)]["results"]) == 2
assert response_json["rows"][str(option_a.id)]["results"][0]["id"] == row_a1.id
assert response_json["rows"][str(option_a.id)]["results"][1]["id"] == row_a2.id
@pytest.mark.django_db
@override_settings(DEBUG=True)
def test_list_all_invalid_select_option_parameter(api_client, premium_data_fixture):
user, token = premium_data_fixture.create_user_and_token(
has_active_premium_license=True
)
table = premium_data_fixture.create_database_table(user=user)
single_select_field = premium_data_fixture.create_single_select_field(table=table)
kanban = premium_data_fixture.create_kanban_view(
table=table, single_select_field=single_select_field
)
url = reverse("api:database:views:kanban:list", kwargs={"view_id": kanban.id})
response = api_client.get(
f"{url}?select_option=null,a",
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_INVALID_SELECT_OPTION_PARAMETER"
response = api_client.get(
f"{url}?select_option=null,1,1&select_option=1,1,a",
**{"HTTP_AUTHORIZATION": f"JWT {token}"},
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_INVALID_SELECT_OPTION_PARAMETER"
@pytest.mark.django_db
@override_settings(DEBUG=True)
def test_patch_kanban_view_field_options(api_client, premium_data_fixture):
user, token = premium_data_fixture.create_user_and_token(
email="<EMAIL>",
password="password",
first_name="Test1",
has_active_premium_license=True,
)
table = premium_data_fixture.create_database_table(user=user)
text_field = premium_data_fixture.create_text_field(table=table)
kanban = premium_data_fixture.create_kanban_view(
table=table, single_select_field=None
)
url = reverse("api:database:views:field_options", kwargs={"view_id": kanban.id})
response = api_client.patch(
url,
{"field_options": {text_field.id: {"width": 300, "hidden": False}}},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert len(response_json["field_options"]) == 1
assert response_json["field_options"][str(text_field.id)]["hidden"] is False
assert response_json["field_options"][str(text_field.id)]["order"] == 32767
options = kanban.get_field_options()
assert len(options) == 1
assert options[0].field_id == text_field.id
assert options[0].hidden is False
assert options[0].order == 32767
@pytest.mark.django_db
@override_settings(DEBUG=True)
def test_create_kanban_view(api_client, data_fixture, premium_data_fixture):
user, token = premium_data_fixture.create_user_and_token(
has_active_premium_license=True
)
table = premium_data_fixture.create_database_table(user=user)
single_select_field = premium_data_fixture.create_single_select_field(table=table)
single_select_field_2 = premium_data_fixture.create_single_select_field()
cover_image_file_field = data_fixture.create_file_field(table=table)
response = api_client.post(
reverse("api:database:views:list", kwargs={"table_id": table.id}),
{
"name": "<NAME>",
"type": "kanban",
"filter_type": "OR",
"filters_disabled": True,
"single_select_field": single_select_field_2.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert (
response_json["error"]
== "ERROR_KANBAN_VIEW_FIELD_DOES_NOT_BELONG_TO_SAME_TABLE"
)
response = api_client.post(
reverse("api:database:views:list", kwargs={"table_id": table.id}),
{
"name": "<NAME>",
"type": "kanban",
"filter_type": "OR",
"filters_disabled": True,
"single_select_field": None,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["name"] == "<NAME>"
assert response_json["type"] == "kanban"
assert response_json["filter_type"] == "OR"
assert response_json["filters_disabled"] is True
assert response_json["single_select_field"] is None
kanban_view = KanbanView.objects.all().last()
assert kanban_view.id == response_json["id"]
assert kanban_view.single_select_field is None
response = api_client.post(
reverse("api:database:views:list", kwargs={"table_id": table.id}),
{
"name": "<NAME>",
"type": "kanban",
"filter_type": "AND",
"filters_disabled": False,
"single_select_field": single_select_field.id,
"card_cover_image_field": cover_image_file_field.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["name"] == "<NAME>"
assert response_json["type"] == "kanban"
assert response_json["filter_type"] == "AND"
assert response_json["filters_disabled"] is False
assert response_json["single_select_field"] == single_select_field.id
assert response_json["card_cover_image_field"] == cover_image_file_field.id
kanban_view = KanbanView.objects.all().last()
assert kanban_view.id == response_json["id"]
assert kanban_view.single_select_field_id == single_select_field.id
@pytest.mark.django_db
def test_create_kanban_view_invalid_card_cover_image_field(api_client, data_fixture):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
text = data_fixture.create_text_field(table=table)
file_field = data_fixture.create_file_field()
response = api_client.post(
reverse("api:database:views:list", kwargs={"table_id": table.id}),
{"name": "<NAME>", "type": "kanban", "card_cover_image_field": text.id},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_REQUEST_BODY_VALIDATION"
assert (
response_json["detail"]["card_cover_image_field"][0]["code"] == "does_not_exist"
)
response = api_client.post(
reverse("api:database:views:list", kwargs={"table_id": table.id}),
{"name": "Test 2", "type": "kanban", "card_cover_image_field": file_field.id},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert response_json["error"] == "ERROR_FIELD_NOT_IN_TABLE"
@pytest.mark.django_db
@override_settings(DEBUG=True)
def test_update_kanban_view(api_client, premium_data_fixture):
user, token = premium_data_fixture.create_user_and_token(
has_active_premium_license=True
)
table = premium_data_fixture.create_database_table(user=user)
kanban_view = premium_data_fixture.create_kanban_view(
table=table, single_select_field=None
)
single_select_field = premium_data_fixture.create_single_select_field(table=table)
single_select_field_2 = premium_data_fixture.create_single_select_field()
response = api_client.patch(
reverse("api:database:views:item", kwargs={"view_id": kanban_view.id}),
{
"single_select_field": single_select_field_2.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_400_BAD_REQUEST
assert (
response_json["error"]
== "ERROR_KANBAN_VIEW_FIELD_DOES_NOT_BELONG_TO_SAME_TABLE"
)
response = api_client.patch(
reverse("api:database:views:item", kwargs={"view_id": kanban_view.id}),
{
"single_select_field": single_select_field.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["single_select_field"] == single_select_field.id
kanban_view.refresh_from_db()
assert kanban_view.single_select_field_id == single_select_field.id
response = api_client.patch(
reverse("api:database:views:item", kwargs={"view_id": kanban_view.id}),
{
"single_select_field": None,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["single_select_field"] is None
kanban_view.refresh_from_db()
assert kanban_view.single_select_field is None
@pytest.mark.django_db
def test_update_kanban_view_card_cover_image_field(
api_client, data_fixture, premium_data_fixture
):
user, token = premium_data_fixture.create_user_and_token(
has_active_premium_license=True
)
table = premium_data_fixture.create_database_table(user=user)
cover_image_file_field = data_fixture.create_file_field(table=table)
kanban_view = premium_data_fixture.create_kanban_view(
table=table, card_cover_image_field=None
)
response = api_client.patch(
reverse("api:database:views:item", kwargs={"view_id": kanban_view.id}),
{
"card_cover_image_field": cover_image_file_field.id,
},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
response_json = response.json()
assert response.status_code == HTTP_200_OK
assert response_json["card_cover_image_field"] == cover_image_file_field.id
``` |
{
"source": "6leetcode/6leetcode",
"score": 3
} |
#### File: Algorithms/0066. Plus One/python.py
```python
from typing import List
class Solution:
def plusOne(self, digits: List[int]) -> List[int]:
length = len(digits)
for i in reversed(range(length)):
if digits[i] == 9:
digits[i] = 0
else:
digits[i] += 1
return digits
if digits[0] == 0:
ans = [1]
ans.extend([0]*length)
return ans
return digits
if __name__ == '__main__':
solution = Solution()
digits = [1, 2, 3]
print("Input: ", digits)
print("Output:", solution.plusOne(digits))
``` |
{
"source": "6lineir/amlak-kara",
"score": 2
} |
#### File: amlak-kara/Ads/models.py
```python
from django.db import models
from accounts.models import User
from django.utils import timezone
# Create your models here.
class City(models.Model):
ct_title = models.CharField(max_length=48, verbose_name='استان')
def __str__(self):
return self.ct_title
class Meta:
verbose_name = "استان"
verbose_name_plural = "استان ها"
class category(models.Model):
cat_title = models.CharField(max_length=48, verbose_name='دسته بندی')
cat_slug = models.SlugField(verbose_name='لینک دسته بندی')
def __str__(self):
return self.cat_title
class Meta:
verbose_name = "دسته بندی"
verbose_name_plural = "دسته بندی ها"
class service(models.Model):
sr_title = models.CharField(max_length=48, verbose_name='امکانات')
def __str__(self):
return self.sr_title
class Meta:
verbose_name = "ویژگی"
verbose_name_plural = "ویژگی ها"
STATUS_CHOICES = (
('R', "اجاره شده"),
('N', "خالی شده"),
)
Cat_CHOICES = (
('R', "برای اجاره"),
('S', "برای فروش"),
)
Room_CHOICES = (
('1', "تک خواب"),
('2', "دو خواب"),
('3', "سه خواب"),
('4', "چهار خواب"),
('5', "پنج به بالا"),
)
Floor_CHOICES = (
('1', "همکف"),
('2', "دو طبقه"),
('3', "سه طبقه"),
('4', "چهار طبقه"),
('5', "پنج طبقه به بالا"),
)
Fool_CHOICES = (
("دو نفر", "دو نفر"),
("چهار نفر", "چهار نفر"),
("پنج نفر", "پنج نفر"),
("شش نفر", "شش نفر"),
("هشت نفر", "هشت نفر"),
("ده نفر", "ده نفر"),
("پانزده به بالا", "پانزده به بالا"),
)
class Ads(models.Model):
author = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="کاربر")
title = models.CharField(max_length=64, verbose_name="عنوان آگهی")
body = models.TextField(verbose_name="توضیحات")
catorg = models.CharField(max_length=1, choices=Cat_CHOICES, verbose_name="اجاره/فروش")
category = models.ForeignKey(category,on_delete=any, verbose_name="دسته بندی")
price = models.IntegerField(blank=True, null=True, verbose_name="قیمت فروش ملک")
pricerent1 = models.IntegerField(blank=True, null=True, verbose_name="قیمت اجاره شنبه تا چهارشنبه")
pricerent2 = models.IntegerField(blank=True, null=True, verbose_name="قیمت اجاره چهارشنبه تا شنبه")
pricerent3 = models.IntegerField(blank=True, null=True, verbose_name="قیمت اجاره تعطیلات")
image = models.ImageField(upload_to="adspic", verbose_name="تصویر کاور")
image2 = models.ImageField(upload_to="adspic", verbose_name="تصویر")
sizeerth = models.IntegerField(blank=True, verbose_name="متراژ زمین")
sizelot = models.IntegerField(verbose_name="متراژ بنا")
floor = models.CharField(max_length=2, choices=Floor_CHOICES, blank=True, verbose_name="تعداد طبقات")
rooms = models.CharField(max_length=1, choices=Room_CHOICES, blank=True, verbose_name="تعداد اتاق خواب")
fool = models.CharField(max_length=20, choices=Fool_CHOICES, blank=True, verbose_name="ظرفیت نفرات")
services = models.ManyToManyField(service, blank=True, verbose_name="امکانات")
cat_city = models.ForeignKey(City,on_delete=any, verbose_name="استان")
status = models.CharField(max_length=1, choices=STATUS_CHOICES, blank=True, verbose_name="وضعیت فعلی" )
created_at = models.DateTimeField(auto_now_add=True, verbose_name="زمان انتشار آگهی")
ads_vip = models.BooleanField(default=False, verbose_name="آگهی ویژه")
def __str__(self):
return self.title
class Meta:
verbose_name = "آگهی"
verbose_name_plural = "آگهی ها"
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.