metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "johnnymetz/ClassicComputerScienceProblemsInPython",
"score": 3
} |
#### File: ClassicComputerScienceProblemsInPython/Chapter6/mj.py
```python
from __future__ import annotations
from typing import List
from data_point import DataPoint
from kmeans import KMeans
class Album(DataPoint):
def __init__(self, name: str, year: int, length: float, tracks: float) -> None:
super().__init__([length, tracks])
self.name = name
self.year = year
self.length = length
self.tracks = tracks
def __repr__(self) -> str:
return f"{self.name}, {self.year}"
if __name__ == "__main__":
albums: List[Album] = [
Album("Got to Be There", 1972, 35.45, 10),
Album("Ben", 1972, 31.31, 10),
Album("Music & Me", 1973, 32.09, 10),
Album("Forever, Michael", 1975, 33.36, 10),
Album("Off the Wall", 1979, 42.28, 10),
Album("Thriller", 1982, 42.19, 9),
Album("Bad", 1987, 48.16, 10),
Album("Dangerous", 1991, 77.03, 14),
Album("HIStory: Past, Present and Future, Book I", 1995, 148.58, 30),
Album("Invincible", 2001, 77.05, 16),
]
kmeans: KMeans[Album] = KMeans(2, albums)
clusters: List[KMeans.Cluster] = kmeans.run()
for index, cluster in enumerate(clusters):
print(
f"Cluster {index} Avg Length {cluster.centroid.dimensions[0]} Avg Tracks {cluster.centroid.dimensions[1]}: {cluster.points}\n"
)
```
#### File: ClassicComputerScienceProblemsInPython/Chapter8/minimax.py
```python
from __future__ import annotations
from board import Piece, Board, Move
# Find the best possible outcome for original player
def minimax(
board: Board, maximizing: bool, original_player: Piece, max_depth: int = 8
) -> float:
# Base case – terminal position or maximum depth reached
if board.is_win or board.is_draw or max_depth == 0:
return board.evaluate(original_player)
# Recursive case - maximize your gains or minimize the opponent's gains
if maximizing:
best_eval: float = float("-inf") # arbitrarily low starting point
for move in board.legal_moves:
result: float = minimax(
board.move(move), False, original_player, max_depth - 1
)
best_eval = max(
result, best_eval
) # we want the move with the highest evaluation
return best_eval
else: # minimizing
worst_eval: float = float("inf")
for move in board.legal_moves:
result = minimax(board.move(move), True, original_player, max_depth - 1)
worst_eval = min(
result, worst_eval
) # we want the move with the lowest evaluation
return worst_eval
def alphabeta(
board: Board,
maximizing: bool,
original_player: Piece,
max_depth: int = 8,
alpha: float = float("-inf"),
beta: float = float("inf"),
) -> float:
# Base case – terminal position or maximum depth reached
if board.is_win or board.is_draw or max_depth == 0:
return board.evaluate(original_player)
# Recursive case - maximize your gains or minimize the opponent's gains
if maximizing:
for move in board.legal_moves:
result: float = alphabeta(
board.move(move), False, original_player, max_depth - 1, alpha, beta
)
alpha = max(result, alpha)
if beta <= alpha:
break
return alpha
else: # minimizing
for move in board.legal_moves:
result = alphabeta(
board.move(move), True, original_player, max_depth - 1, alpha, beta
)
beta = min(result, beta)
if beta <= alpha:
break
return beta
# Find the best possible move in the current position
# looking up to max_depth ahead
def find_best_move(board: Board, max_depth: int = 8) -> Move:
best_eval: float = float("-inf")
best_move: Move = Move(-1)
for move in board.legal_moves:
result: float = alphabeta(board.move(move), False, board.turn, max_depth)
if result > best_eval:
best_eval = result
best_move = move
return best_move
``` |
{
"source": "johnnymillergh/home_guard",
"score": 3
} |
#### File: home_guardian/common/startup.py
```python
from __future__ import annotations
from enum import Enum, unique
@unique
class StartupMode(Enum):
"""
StartupMode is an enumeration of possible startup modes.
"""
# Detect and inspect
DETECT = "detect"
# Collect data
COLLECT = "collect"
# Train data
TRAIN = "train"
@staticmethod
def value_of(value: str) -> StartupMode:
for member in StartupMode:
if member.value == value:
return member
raise ValueError(f"Unknown startup mode: {value}")
```
#### File: home_guardian/common/time.py
```python
import time
from loguru import logger
def elapsed_time(fn):
def decorator(*arg, **kwarg):
start_time = time.time()
return_value = fn(*arg, **kwarg)
end_time = time.time()
logger.info(f"Elapsed time of function {fn}:{round(end_time - start_time, 4)}s")
return return_value
return decorator
```
#### File: home_guardian/opencv/threading.py
```python
from __future__ import annotations
from threading import Lock, Thread
from typing import Any
import cv2.cv2 as cv2
from cv2.cv2 import VideoCapture
from cv2.mat_wrapper import Mat
from loguru import logger
class VideoCaptureThreading:
"""
Class to capture video from a camera or a video file by Python threading.
Inspired by https://github.com/gilbertfrancois/video-capture-async/blob/master/main/gfd/py/video/capture.py
"""
def __init__(self, src: int = 0, width: int = 640, height: int = 480):
"""
Initialize the video capture threading object.
:param src: The source of the video.
:param width: The width of the video.
:param height: The height of the video.
"""
self._thread: Thread = Thread(target=self._thread_loop, args=())
self._src: int = src
self._video_capture: VideoCapture = VideoCapture(self._src)
self.set(cv2.CAP_PROP_FRAME_WIDTH, width).set(cv2.CAP_PROP_FRAME_HEIGHT, height)
# `_grabbed` is a boolean indicating if the frame is available or not
self._grabbed, self._frame = self._video_capture.read()
self._started = False
self._read_lock = Lock()
logger.warning("Initialized {}", self)
def set(self, property_id: int, value: Any) -> VideoCaptureThreading:
"""
Sets a property in the VideoCapture.
:param property_id: The property id.
:param value: The value of the property.
:return: self
"""
self._video_capture.set(property_id, value)
return self
def start(self) -> VideoCaptureThreading:
"""
Start the thread to read frames from the video stream.
:return: self
"""
if self._started:
logger.warning(
"Threaded video capturing has already been started! Cannot be started again."
)
return self
self._started = True
self._thread.start()
logger.debug("Started video capture thread. Thread: {}", self._thread)
return self
def _thread_loop(self) -> None:
"""
[Private] Loop over frames from the video stream.
"""
logger.warning("Started video capture loop. Thread: {}", self._thread)
while self._started:
grabbed, frame = self._video_capture.read()
with self._read_lock:
self._grabbed = grabbed
self._frame = frame
logger.warning("Stopped video capture loop. Thread: {}", self._thread)
def read(self) -> tuple[bool, Mat]:
"""
Read the frame from the video stream.
:return: `grabbed` is a boolean indicating if the frame is available or not
"""
with self._read_lock:
grabbed = self._grabbed
frame = self._frame.copy()
return grabbed, frame
def stop(self):
"""
Stop the thread and release video capture object.
"""
self._started = False
self._thread.join()
def __exit__(self, exec_type, exc_value, traceback):
"""
Release video capture object
"""
self._video_capture.release()
logger.warning("Released {}", self)
```
#### File: home_guardian/repository/trained_face_repository.py
```python
from datetime import datetime
from loguru import logger
from home_guardian.repository.model.trained_face import TrainedFace
@logger.catch
def save_or_update(username: str) -> TrainedFace:
"""
Save or update a trained face.
:param username: The username of the trained face.
:return: a new trained face.
"""
trained_face: TrainedFace = TrainedFace(username=username)
try:
trained_face.save()
except Exception as e:
logger.warning(f"Exception occurred while saving trained face. {e}")
trained_face.update({TrainedFace.modified_time: datetime.now()}).where(
TrainedFace.username == username
).execute()
trained_face = TrainedFace.get_or_none(TrainedFace.username == username)
return trained_face
def get_by_id(_id: int) -> TrainedFace:
"""
Get a trained face by id.
:param _id: The id of the trained face.
:return: a trained face.
"""
return TrainedFace.get_by_id(_id)
``` |
{
"source": "johnnymillergh/python_boilerplate",
"score": 3
} |
#### File: python_boilerplate/messaging/sending_email.py
```python
import datetime
import logging
import smtplib
from email.header import Header
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from typing import List
log = logging.getLogger("rotatingFileLogger")
# Email constants
mail_host: str = "smtp.sina.com"
mail_user: str = "johnnys_rpi_3b"
authorization_password: str = "<PASSWORD>"
sender: str = f"{<EMAIL>"
receivers: List[str] = ["<EMAIL>"]
def build_message(receiver: str) -> MIMEMultipart:
content: MIMEText = MIMEText(
"This is a email from Python "
+ datetime.datetime.now().strftime("%Y-%m-%d %T"),
"plain",
"utf-8",
)
message: MIMEMultipart = MIMEMultipart()
message["Subject"] = Header(
"Emergency Security Alert, at "
+ datetime.datetime.now().strftime("%Y-%m-%d %T"),
"utf-8",
)
message["From"] = Header(sender)
message["To"] = Header(receiver)
message["Cc"] = Header(sender)
message.attach(content)
return message
def send_email():
smtp: smtplib.SMTP = smtplib.SMTP(mail_host, 25)
smtp.connect(mail_host, 25)
smtp.login(mail_user, authorization_password)
for receiver in receivers:
message: MIMEMultipart = build_message(receiver)
try:
log.info(f"Sending email. receiver: {receiver}")
smtp.sendmail(sender, [receiver], message.as_string())
log.info(
f"Sent email successfully. {smtp}. receiver: {receiver}, message: {message}"
)
except smtplib.SMTPException:
log.exception("Exception occurred while sending email!")
smtp.quit()
``` |
{
"source": "JohnnyMoonlight/MacherdaachBadgeQueue-1",
"score": 2
} |
#### File: MacherdaachBadgeQueue-1/Software/main.py
```python
from controller.controller import Controller
def run():
Controller().startup_controller()
if __name__ == '__main__':
run()
``` |
{
"source": "johnnynetgevity/cardano-sl",
"score": 3
} |
#### File: python-api/explorer_python_api/explorer.py
```python
import requests
from requests.exceptions import ConnectionError
import logging
from datetime import datetime
CODE = {
'NOT_ACCESSIBLE': -1, # can't ping explorer
'RESPONSE_ERROR': -2, # explorer returned "Left"
'WRONG_RESPONSE': -3, # some unexpected message in response
'CALL_EXCEPTION': -4, # python exception while requesting
}
class ExplorerClient():
def __init__(self, logger, url):
self.logger = logger
self.url = url
def total_pages(self):
return self.__req_right('blocks/pages/total')
def slot(self, epoch, slot):
return self.__req_right(f'epochs/{epoch}/{slot}')
def range(self, startHash, endHash):
return self.__req_right(f'blocks/range/{startHash}/{endHash}')
def page(self):
return self.__req_right(f'blocks/pages')
def txs(self, hash):
return self.__req_right(f'blocks/txs/{hash}?limit=5000')
def tx(self, hash):
return self.__req_right(f'txs/summary/{hash}')
def __req_right(self, path):
def processor(r):
if 'Right' in r:
return {'ok': r['Right']}
if 'Left' in r:
return {'error': f'Request returned error! {r}', 'code': CODE['RESPONSE_ERROR'], 'left': r['Left']}
return {'error': f'Unexpected response format! {r}', 'code': CODE['WRONG_RESPONSE']}
return self.__req(path, processor)
def __req(self, path, result_processor):
try:
try:
return result_processor(self.__reget(f'{self.url}/api/{path}'))
except ConnectionError as e:
self.__error(f"Explorer is not accessible trying remote! {e}")
return {'error': msg, 'code': CODE['NOT_ACCESSIBLE']}
except BaseException as e:
msg = f'Explorer call has failed with an error! {e}'
self.__error(msg)
return {'error': msg, 'code': CODE['CALL_EXCEPTION']}
def __reget(self, url):
self.__debug(f'RE: {url}')
json = requests.get(url).json()
self.__debug(f'RP: {json}')
return json
def __debug(self, msg):
self.logger.debug(msg)
def __error(self, msg):
self.logger.error(msg)
``` |
{
"source": "johnnynieves/Cisco_Network_Scripts",
"score": 2
} |
#### File: johnnynieves/Cisco_Network_Scripts/test2.py
```python
from napalm import get_network_driver
from napalm.base.exceptions import ConnectionException
from getpass import getpass
from netmiko import NetMikoAuthenticationException
from os import system
from Network_Menu import creds, get_driver_info
driver = get_network_driver('ios')
auth_error = 'Auth Error for '
cannot_connect = 'Could not connect to '
def set_configs():
driver_info = get_driver_info()
for i in range(int(driver_info[1]), int(driver_info[2])+1):
try:
ip = driver_info[0] + str(i)
device = driver(ip, creds()[0], creds()[1])
print(f'\nConnecting to {ip}')
device.open()
with open('config', 'r') as f:
config = f.readlines()
print('-' * 80)
print(device.device.send_config_set(config))
print('-' * 80)
except NetMikoAuthenticationException:
print(auth_error, ip)
print('-' * 80)
except ConnectionException:
print(cannot_connect, ip)
print('-' * 80)
f.close()
if __name__ == "__main__":
set_configs()
``` |
{
"source": "johnnynode/AI-LEARNING-MATERIAL",
"score": 4
} |
#### File: Basic-Python/code/password_generator.py
```python
import random
word_file = "words.txt"
word_list = []
#fill up the word_list
with open(word_file,'r') as words:
for line in words:
# remove white space and make everything lowercase
word = line.strip().lower()
# don't include words that are too long or too short
if 3 < len(word) < 8:
word_list.append(word)
# Add your function generate_password here
# It should return a string consisting of three random words
# concatenated together without spaces
def generate_password() :
return random.choice(word_list) + random.choice(word_list) + random.choice(word_list)
# test your function
print(generate_password())
```
#### File: code/test_file/7.py
```python
def copyFun(src, target):
# src 是源文件 target是目标文件
# 打开源文件
f1 = open(src, 'rb')
f2 = open(target, 'wb')
# 循环读取写入
content = f1.readline()
while len(content) > 0:
# print(content, end="")
f2.write(content)
content = f1.readline()
# 关闭源和目标文件
f1.close()
f2.close()
copyFun('img/ccc.jpg', 'img/ddd.jpg')
```
#### File: code/test_file/file.py
```python
'''
f=open('./a.txt', 'r')
content = f.readline()
while len(content) > 0:
content = f.readline()
print(content, end="")
f.close()
'''
### 读取所有的行 readlines 试用于读取小文件
'''
f=open('./a.txt', 'r')
flist = f.readlines() # 返回一个列表
for line in flist:
print(line, end="")
f.close()
'''
### 简单的文件写入操作
'''
f=open('./b.txt', 'w') # 清空写
f.write('Hello Java\n')
f.write('Hello Java\n')
f.close()
'''
### 批量写
'''
a=['Hello World\n', 'Hello Java\n', 'Hello Python\n']
f=open('./b.txt', 'w') # w是清空写,a是追加写
f.writelines(a)
f.close()
'''
### 自定义文件复制函数
def myCopy(file1, file2):
'''
file1 : 源文件
file2 : 目标文件
'''
# 1. 打开两个文件
f1 = open(file1, 'r')
f2 = open(file2, 'w')
# 2. 循环读取并写入实现复制内容
content = f1.readline()
while len(content) > 0:
f2.write(content)
content = f1.readline()
# 3. 关闭两个文件
f1.close()
f2.close()
# myCopy('./a.txt', './a_copy.txt')
### 关于图片,声音,视频,可执行程序等二进制文件读取
'''
需要注意的是二进制文件读取时,模式要选择相应的b 如 rb
文档内容一般分为2种格式:
1. 字符(一般不需要处理)
2. 字节(二进制)
二进制的需要特殊处理:
f1 = open(file1, 'rb')
f2 = open(file2, 'wb')
此处不再举例
'''
### 目录操作
'''
import os
os.getcwd() # 获取当前工作目录
os.chdir('your dir') # 跳转到某个目录
os.listdir() # 列出当前目录下的文件 返回一个文件列表
举例:
a = os.listdir()
for i in a:
print(i)
os.mkdir('bb') # 创建一个目录
os.rename('bb', 'cc') # 把 bb 文件/文件夹 改名为 cc
os.rmdir('cc') # 删除 cc 的文件夹 适合 空文件
os.rmdir('aa') # 如果aa目录中有文件,则会报错
os.stat('file.py') # 返回一个文件对象
举例:
info=os.stat('file.py')
info.st_size # 获取文件大小
info.其他属性 # 获取其他属性信息
系统命令
os.getenv('PATH') # 获取环境变量
os.putenv()
os.exit() 退出当前执行命令,直接关闭当前操作
os.system() 执行系统命令
'''
### 当前os模块的值
'''
os.curdir # 获取当前目录
os.name # 获取当前是什么系统
os.sep # 获取当前系统的分隔符 windows下是 \\ linux下是 / 常用,便于程序的移植性
os.extsep # 获取当前系统中文件名和后缀之间的分隔符号,所有系统都是 不常用
os.linesep # 获取当前系统的换行符号 不常用
### os.path 模块
os.path.abspath('相对路径') # 将相对路径转换为绝对路径
os.path.basename('路径') # 获取路径中文件夹或文件名称
os.path.dirname('路径') # 获取路径中的路径部分
os.path.join('路径1','路径2') # 将2个路径合成1个路径
os.path.split('路径') # 将一个领切割成文件夹和文件名部分
os.path.splitext('文件名称') # 将一个文佳宁切成名字和后缀两个部分 返回值 元组(名称,后缀)
os.path.getsize('路径') # 获取一个文件的大小 返回值 整数 只能是文件大小,不是目录大小 获取目录大小需要遍历
os.path.isfile('路径') # 检测一个路径是否是一个文件 返回值 布尔
os.path.exists('路径') # 检测文件是否存在 返回值 布尔
'''
### 复制文件夹的函数
'''
import os
def myCopy(file1, file2):
'''
file1 : 源文件
file2 : 目标文件
'''
# 1. 打开两个文件
f1 = open(file1, 'r')
f2 = open(file2, 'w')
# 2. 循环读取并写入实现复制内容
content = f1.readline()
while len(content) > 0:
f2.write(content)
content = f1.readline()
# 3. 关闭两个文件
f1.close()
f2.close()
def copyDir(dir1, dir2):
# 获取被复制目录中的所有文件信息
dlist = os.listdir(dir1)
# 创建新的目录
os.mkdir(dir2)
# 遍历所有文件,并执行文件复制
for f in dlist:
# 为遍历的文件添加目录路径
file1 = os.path.join(dir1, f) # 源
file2 = os.path.join(dir2, f) # 目标
# 判断是否是文件
if os.path.isfile(file1):
myCopy(file1, file2) # 调用自定义文件复制函数来复制文件
## 判断是否是目录
if os.path.isdir(file1):
copyDir(file1, file2) # 递归调用自己,来实现子目录的复制
'''
### todo 使用文件和目录操作,定义一个递归统计目录大小的函数
'''
^_^
'''
```
#### File: Basic-Python/code/test_function.py
```python
'''
def bb(m1, m2):
print(m1 + m2)
bb()
'''
### 带有默认值的函数
'''
def cc(name='jack', age=20, sex='man'):
print(name)
print(age)
print(sex)
cc() # 不传递参数 用默认值
cc('John') # 只传递一个
cc('Lily', 18, 'female') 传递参数,用参数
'''
### 关键字参数 可以改变传递顺序
'''
def dd(name='jack', age=20, sex='man'):
print(name)
print(age)
print(sex)
# 可以有效防止参数传错
dd(sex="female", age=10, name="JJ") # 参数不通过参数的位置来决定,通过参数对应的关键字属性决定,不用考虑传递顺序
'''
### 收集参数
#### 非关键字收集
'''
def demo(*arg):
print(arg) # 以元组的方式 得到参数
sum=0
for i in arg:
sum+=i
print('final sum: ', sum)
demo(10, 20, 30)
'''
#### 关键字收集 注意用 2颗*
'''
def demo(**arg):
print(arg)
demo(name="jack", age=10) # 调用后输出的是字典参数 输出:{'name':'jack', 'age':10}
'''
### 混合模式
'''
def demo(m, **arg):
print(m)
print(arg)
demo(10, name="jack", age=10) # 调用后输出的是字典参数 输出:10, {'name':'jack', 'age':10}
'''
### 返回值的使用
#### 定义一个计算指定数值累加的函数
'''
def sum(m):
# 在内部写注释
total=0
for i in range(0, m+1):
total += i
return total
print(sum(100)) # 5050
'''
#### 两种方式查看文档函数
'''
help(sum) # 在终端下输出
sum.__doc__ # 在文档输出
'''
#### 局部变量和全局变量
'''
1. 定义在函数内部的变量拥有一个局部作用域,定义在函数外的拥有全局作用于
2. 局部变量只能再起被声明的函数内部访问,而全局变量可以在整个程序范围内访问。
3. 调用函数时,所有在函数内声明的变量名称都精被加入到作用域中。
'''
##### 函数外定义的称全局变量
'''
name = 'zhagnsan'
def fun():
name='lisi'
print(name) # 输出的是 lisi 局部变量
fun()
print('函数外输出全局变量: name: ', name) # 函数外输出全局变量: name: zhagnsan
'''
##### 使用 global 使用全局变量
'''
name = 'zhagnsan'
def fun():
global name
print(name) # 输出的是 zhangsan
name='lisi' # 此处修改了全局变量
fun()
print('函数外输出全局变量: name: ', name) # 函数外输出全局变量: name: lisi
'''
### 匿名函数 lambda 表达式
'''
1. 匿名函数:即不再使用def语句这样标准的形式定义一个函数
2. python 使用 lambda 来创建匿名函数
3. lambda 只是一个表达式,函数体比def简单很多
4. lambda 主体是一个表达式,而不是代码块,仅仅能在lambda表达式中封装有限的逻辑进去。
5. lambda 函数拥有自己的命名空间,且不能访问自己参数列表之外或全局命名空间里的参数。
6. 虽然lambda函数看起来只能写一行,却不等同于c或C++的内联函数,后者的目的是调用小函数时不占用栈内存从而增加运行效率。
7. 语法:lambda 函数的语法只包含一个语句,如:lambda [arg1, [arg2, ...argn]]:expression
'''
'''
sum=lambda v1, v2: v1+v2 # 这个方式中 冒号前面的是参数,冒号后面的是最终return出去的
print(sum(1,2)) # 3
'''
### 一些内置的函数
import math
import random
print(abs(10)) # 10
print(abs(-10)) # 10
print(max([10,20,30])) # 30
print(min(2,4,6)) # 2
print(round(4.56789)) # 5
print(round(4.56789, 2)) # 4.57 保留2位小数
print(math.ceil(4.0001)) # 5 天花板
print(math.floor(4.99999)) # 4 地板
print(random()) # 0.282323423423423 输出 0 ~ 1 之间的随机数
print(random.choice([10,20,30,40])) # 随机取出一个列表中的值
print(random.randrange(10)) # 随机一个 0 ~ 9 的整数
print(random.randrange(5, 10)) # 随机一个 5 ~ 9 的整数
print(random.randrange(0, 11, 2)) # 随机一个0~10的偶数
print(random.shuffle([10,20,30,40])) # 随机打乱一个列表
print(math.sin(90)) # 三角函数 0.8939966636005579
print(math.pi) # 圆周率 3.141592653589793
print(math.e) # 自然常数 2.718281828459045
```
#### File: code/test_magic/3.py
```python
class Person:
name='zhangsan'
age=20
p = Person()
print(p) # <__main__.Person object at 0x10073e668>
print('⭐️ ' * 20)
class Stu:
name='zhangsan'
age=20
def __str__(self):
return "name: %s; age: %d"%(self.name, self.age)
s = Stu()
print(s) # name: zhangsan; age: 20
```
#### File: code/test_magic/4.py
```python
class Demo:
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "Demo(%d, %d)"%(self.x, self.y)
def __add__(self, other):
return Demo(self.x + other.x, self.y + other.y) # 返回一个实例对象
d1 = Demo(5, 8)
d2 = Demo(9, -4)
print(d1 + d2) # Demo(14, 4) 两个对象相加就会调用 __add__魔术方法
```
#### File: code/test_property/3.py
```python
class Rectangle:
def __init__(self):
self.width = 0
self.height = 0
def setSize(self, size):
self.width, self.height = size # 注意:此处size可以是个元组、列表、集合
def getSize(self):
return self.width, self.height
r = Rectangle()
r.setSize({100, 200})
print(r.width, r.height) # 100 200
print(r.getSize()) # (200, 100)
```
#### File: Projects/ImageClassifier/predict.py
```python
import matplotlib.pyplot as plt
import torch
from torch import nn
from torch import optim
# 训练相关
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torchvision
from torchvision import datasets, transforms
import torchvision.models as models
import json
from tqdm import tqdm
import numpy as np
from PIL import Image
import copy
from collections import OrderedDict
# 命令行相关
import argparse
# input checkpoint
parser = argparse.ArgumentParser()
# 此处设置默认参数: input
parser.add_argument("i", "input")
# 此处设置默认参数: checkpoint
parser.add_argument("cp", "checkpoint")
# 设置可选参数: --top_k
parser.add_argument("-t", "--top_k", type=int)
# 设置可选参数: --category_names
parser.add_argument("-cn", "--category_names")
# 设置可选参数: --gpu
parser.add_argument("-g", "--gpu", action="store_true")
# 获取参数
args = parser.parse_args()
# 声明变量
input = None; # 数据路径
checkpoint = None; # 保存检查点的路径
top_k = None; # 模型架构
category_names = None; # 学习速率
gpu = None; # 隐藏单元
# 通过参数更新数据
if args.input:
input = args.input
#print('args.input: ' , args.input)
if args.checkpoint:
checkpoint = args.checkpoint
#print('args.checkpoint: ', args.checkpoint)
if args.top_k:
top_k = args.top_k
#print('arags.top_k: ', args.top_k)
if args.category_names:
category_names = args.category_names
#print('args.category_names', args.category_names)
if args.gpu:
gpu = args.gpu
#print('args.gpu', args.gpu)
# 开始加载检查点
def load_ckp(filepath):
ckp = torch.load(filepath)
model = ckp['model']
class_to_idx = ckp['class_to_idx']
return model, class_to_idx
model, class_to_idx = load_ckp(checkpoint)
# 图像处理
def process_image(image):
im = Image.open(image)
w, h = im.size
if w < h:
pair = (256, int(256/w * h))
else:
pair = (int(256/h * w), 256)
im = im.resize(pair)
# 重新获取 w,h
w, h = im.size
# 你需要从图像的中心裁剪出 224x224 的部分
w_crop = (w - 224) / 2
h_crop = (h - 224) / 2
# (left, upper, right, lower)
box = (w_crop, h_crop, w - w_crop, h - h_crop)
# print('box', box)
#裁剪
region = im.crop(box)
# print(region.size)
# 图片转np
np_image = np.array(region)
# 图片除255进行归一化 标准化
ave = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image_norm = (np_image / 255 - ave) / std
#print(np_image_norm)
res = np_image_norm.transpose(2, 0, 1)
return torch.from_numpy(res)
#显示图片功能
def imshow(image, ax=None, title=None):
if ax is None:
fig, ax = plt.subplots()
image = image.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
image = std * image + mean
image = np.clip(image, 0, 1)
ax.imshow(image)
return ax
# 显示图片
img_tensor = process_image(input)
imshow(img_tensor)
# 预测图片函数
def predict(image_path, model, topk=5):
model.eval()
# 处理图片
img_tensor = process_image(image_path)
# 加一维数据,让它变成一个4-D的Tensor
fourDTensor = img_tensor.unsqueeze(0)
# .type(torch.FloatTensor)
fourDTensor = fourDTensor.type(torch.cuda.FloatTensor)
# target = Variable(fourDTensor)
target = Variable(fourDTensor.cuda(), volatile=True)
output = model(target)
# pre = torch.exp(output).data
pre = torch.exp(output)
probs, index = pre.topk(topk)
# 再次处理
probs = probs.cpu().detach().numpy().tolist()[0]
index = index.cpu().detach().numpy().tolist()[0]
res_index = []
for i in index:
res_index.append(class_to_idx[i])
return probs, res_index
# 标签映射
with open(category_names, 'r') as f:
cat_to_name = json.load(f)
# 预测图片
probs, classes = predict(input, model, top_k)
names = [cat_to_name[item] for item in classes]
print(probs)
print(names)
``` |
{
"source": "johnnynode/d_shop",
"score": 2
} |
#### File: admin/views/orders.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from django.db.models import Q
from django.core.paginator import Paginator
from common.models import Goods,Users,Orders,Detail
# Create your views here.
def index(request,pIndex=1):
'''浏览信息'''
#获取订单信息
mod = Orders.objects
mywhere=[]
# 获取、判断并封装关keyword键搜索
kw = request.GET.get("keyword",None)
if kw:
# 查询收件人和地址中只要含有关键字的都可以
list = mod.filter(Q(linkman_contains=kw) | Q(address__contains=kw))
mywhere.append("keyword="+kw)
else:
list = mod.filter()
# 获取、判断并封装订单状态state搜索条件
state = request.GET.get('state','')
if state != '':
list = list.filter(state=state)
mywhere.append("state="+state)
#执行分页处理
pIndex = int(pIndex)
page = Paginator(list,5) #以5条每页创建分页对象
maxpages = page.num_pages #最大页数
#判断页数是否越界
if pIndex > maxpages:
pIndex = maxpages
if pIndex < 1:
pIndex = 1
list2 = page.page(pIndex) #当前页数据
plist = page.page_range #页码数列表
# 遍历订单信息并追加 下订单人姓名信息
for od in list2:
user = Users.objects.only('name').get(id=od.uid)
od.name = user.name
#封装信息加载模板输出
context = {"orderslist":list2,'plist':plist,'pIndex':pIndex,'maxpages':maxpages,'mywhere':mywhere}
return render(request,"admin/orders/index.html",context)
def detail(request,oid):
''' 订单详情信息 '''
try:
# 加载订单信息
orders = Orders.objects.get(id=oid)
if orders != None:
user = Users.objects.only('name').get(id=orders.uid)
orders.name = user.name
# 加载订单详情
dlist = Detail.objects.filter(orderid=oid)
# 遍历每个商品详情,从Goods中获取对应的图片
for og in dlist:
og.picname = Goods.objects.only('picname').get(id=og.goodsid).picname
# 放置模板变量,加载模板并输出
context = {'orders':orders,'detaillist':dlist}
return render(request,"admin/orders/detail.html",context)
except Exception as err:
print(err)
context = {'info':'没有找到要查看的信息!'}
return render(request,"admin/info.html",context)
def state(request):
''' 修改订单状态 '''
try:
oid = request.GET.get("oid",'0')
ob = Orders.objects.get(id=oid)
ob.state = request.GET['state']
ob.save()
context = {'info':'修改成功!'}
except Exception as err:
print(err)
context = {'info':'修改失败!'}
return render(request,"admin/info.html",context)
```
#### File: admin/views/type.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from django.shortcuts import redirect
from django.core.urlresolvers import reverse
from common.models import Types
# 浏览商品类别信息
def index(request, pIndex=1):
# 执行数据查询,并放置到模板中
list = Types.objects.extra(select = {'_has':'concat(path,id)'}).order_by('_has')
# 遍历查询结果,为每个结果对象追加一个pname属性,目的用于缩进标题
for ob in list:
ob.pname ='. . . '*(ob.path.count(',')-1)
# print(list[0].__dict__)
#封装信息加载模板输出
context = {"typeslist":list}
return render(request,"admin/type/index.html",context)
# 商品类别信息添加表单
def add(request,tid):
# 获取父类别信息,若没有则默认为根类别信息
if tid == '0':
context = {'pid':0,'path':'0,','name':'根类别'}
else:
ob = Types.objects.get(id=tid)
context = {'pid':ob.id,'path':ob.path+str(ob.id)+',','name':ob.name}
return render(request,'admin/type/add.html',context)
#执行商品类别信息添加
def insert(request):
try:
ob = Types()
ob.name = request.POST['name']
ob.pid = request.POST['pid']
ob.path = request.POST['path']
ob.save()
context = {'info':'添加成功!'}
except Exception as err:
print(err)
context = {'info':'添加失败!'}
return render(request,"admin/info.html",context)
# 执行商品类别信息删除
def delete(request,tid):
try:
# 获取被删除商品的子类别信息量,若有数据,就禁止删除当前类别
row = Types.objects.filter(pid=tid).count()
if row > 0:
context = {'info':'删除失败:此类别下还有子类别!'}
return render(request,"admin/info.html",context)
ob = Types.objects.get(id=tid)
ob.delete()
context = {'info':'删除成功!'}
except Exception as err:
print(err)
context = {'info':'删除失败!'}
return render(request,"admin/info.html",context)
# 打开商品类别信息编辑表单
def edit(request,tid):
try:
ob = Types.objects.get(id=tid)
context = {'type':ob}
return render(request,"admin/type/edit.html",context)
except Exception as err:
print(err)
context = {'info':'没有找到要修改的信息!'}
return render(request,"admin/info.html",context)
# 执行商品类别信息编辑
def update(request,tid):
try:
ob = Types.objects.get(id=tid)
ob.name = request.POST['name']
ob.save()
context = {'info':'修改成功!'}
except Exception as err:
print(err)
context = {'info':'修改失败!'}
return render(request,"admin/info.html",context)
```
#### File: admin/views/users.py
```python
from django.shortcuts import render
from django.http import HttpResponse
from django.db.models import Q
from django.core.paginator import Paginator
from datetime import datetime
from common.models import Users
# Create your views here.
def index(request,pIndex=1):
'''浏览信息'''
umod = Users.objects
mywhere=[]
# 获取、判断并封装关keyword键搜索
kw = request.GET.get("keyword",None)
if kw:
# 查询账户或真实姓名中只要含有关键字的都可以
list = umod.filter(Q(username__contains=kw) | Q(name__contains=kw))
mywhere.append("keyword="+kw)
else:
list = umod.filter()
# 获取、判断并封装性别sex搜索条件
sex = request.GET.get('sex','')
if sex != '':
list = list.filter(sex=sex)
mywhere.append("sex="+sex)
#执行分页处理
pIndex = int(pIndex)
page = Paginator(list,5) #以5条每页创建分页对象
maxpages = page.num_pages #最大页数
#判断页数是否越界
if pIndex > maxpages:
pIndex = maxpages
if pIndex < 1:
pIndex = 1
list2 = page.page(pIndex) #当前页数据
plist = page.page_range #页码数列表
#封装信息加载模板输出
context = {"userslist":list2,'plist':plist,'pIndex':pIndex,'maxpages':maxpages,'mywhere':mywhere}
return render(request,"admin/users/index.html",context)
def add(request):
'''加载添加页面'''
return render(request,"admin/users/add.html")
def insert(request):
'''执行添加'''
try:
ob = Users()
ob.username = request.POST['username']
ob.name = request.POST['name']
#获取密码并md5
import hashlib
m = hashlib.md5()
m.update(bytes(request.POST['password'],encoding="utf8"))
ob.password = m.hexdigest()
ob.sex = request.POST['sex']
ob.address = request.POST['address']
ob.code = request.POST['code']
ob.phone = request.POST['phone']
ob.email = request.POST['email']
ob.state = 1
ob.addtime = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
ob.save()
context={"info":"添加成功!"}
except Exception as err:
print(err)
context={"info":"添加失败"}
return render(request,"admin/info.html",context)
def delete(request,uid):
'''删除信息'''
try:
ob = Users.objects.get(id=uid)
ob.delete()
context={"info":"删除成功!"}
except Exception as err:
print(err)
context={"info":"删除失败"}
return render(request,"admin/info.html",context)
def edit(request,uid):
'''加载编辑信息页面'''
try:
ob = Users.objects.get(id=uid)
context={"user":ob}
return render(request,"admin/users/edit.html",context)
except Exception as err:
context={"info":"没有找到要修改的信息!"}
return render(request,"admin/info.html",context)
def update(request,uid):
'''执行编辑信息'''
try:
ob = Users.objects.get(id=uid)
ob.name = request.POST['name']
ob.sex = request.POST['sex']
ob.address = request.POST['address']
ob.code = request.POST['code']
ob.phone = request.POST['phone']
ob.email = request.POST['email']
ob.state = request.POST['state']
ob.save()
context={"info":"修改成功!"}
except Exception as err:
print(err)
context={"info":"修改失败"}
return render(request,"admin/info.html",context)
def resetpass(request,uid):
'''加载重置会员密码信息页面'''
try:
ob = Users.objects.get(id=uid)
context={"user":ob}
return render(request,"admin/users/resetpass.html",context)
except Exception as err:
context={"info":"没有找到要修改的信息!"}
return render(request,"admin/info.html",context)
def doresetpass(request,uid):
'''执行编辑信息'''
try:
ob = Users.objects.get(id=uid)
#获取密码并md5
import hashlib
m = hashlib.md5()
m.update(bytes(request.POST['password'],encoding="utf8"))
ob.password = m.<PASSWORD>()
ob.save()
context={"info":"密码重置成功!"}
except Exception as err:
print(err)
context={"info":"密码重置失败"}
return render(request,"admin/info.html",context)
``` |
{
"source": "johnnynode/python-spider",
"score": 3
} |
#### File: contents/code/pro5.py
```python
from requests.exceptions import RequestException
from lxml import etree
import requests
import time,json
def getPage(url):
'''爬取指定url页面信息'''
try:
#定义请求头信息
headers = {
'User-Agent':'User-Agent:Mozilla/5.0(WindowsNT6.1;rv:2.0.1)Gecko/20100101Firefox/4.0.1'
}
# 执行爬取
res = requests.get(url,headers=headers)
#判断响应状态,并响应爬取内容
if res.status_code == 200:
return res.text
else:
return None
except RequestException:
return None
def parsePage(content):
'''解析爬取网页中的内容,并返回字段结果'''
# =======使用xpath解析====================
# 解析HTML文档,返回根节点对象
html = etree.HTML(content)
#获取网页中所有标签并遍历输出标签名
items = html.xpath('//div[@class="item"]')
#遍历封装数据并返回
for item in items:
yield {
'index':item.xpath('.//div/em[@class=""]/text()')[0],
'image':item.xpath('.//img[@width="100"]/@src')[0],
'title':item.xpath('.//span[@class="title"]/text()')[0],
'actor':item.xpath('.//p[@class=""]/text()')[0],
'score':item.xpath('.//span[@class="rating_num"]/text()'),
#'time':item[4].strip()[5:],
}
def writeFile(content):
'''执行文件追加写操作'''
with open("./result.txt",'a',encoding='utf-8') as f:
f.write(json.dumps(content, ensure_ascii=False) + "\n")
#json.dumps 序列化时对中文默认使用的ascii编码.想输出真正的中文需要指定 ensure_ascii=False
def main(offset):
''' 主程序函数,负责调度执行爬虫处理 '''
url = 'https://movie.douban.com/top250?start=' + str(offset)
#print(url)
html = getPage(url)
#判断是否爬取到数据,并调用解析函数
if html:
for item in parsePage(html):
writeFile(item)
# 判断当前执行是否为主程序运行,并遍历调用主函数爬取数据
if __name__ == '__main__':
for i in range(10):
main(offset=i*25)
time.sleep(1)
```
#### File: fang_5i5j/spiders/fang.py
```python
import scrapy
from fang_5i5j.items import FangItem
from scrapy_redis.spiders import RedisSpider
class FangSpider(RedisSpider):
name = 'fang'
#allowed_domains = ['fang.5i5j.com']
#start_urls = ['https://fang.5i5j.com/bj/loupan/']
redis_key = 'fangspider:start_urls'
def __init__(self, *args, **kwargs):
# Dynamically define the allowed domains list.
domain = kwargs.pop('domain', '')
self.allowed_domains = filter(None, domain.split(','))
super(FangSpider, self).__init__(*args, **kwargs)
def parse(self, response):
#print(response.status)
hlist = response.css("li.houst_ctn")
for vo in hlist:
item = FangItem()
item['title'] = vo.css("span.house_name::text").extract_first()
# print(item)
yield item
#pass
``` |
{
"source": "johnnyp2587/fx-drqn",
"score": 3
} |
#### File: johnnyp2587/fx-drqn/process_data.py
```python
import numpy as np
import pandas as pd
import datetime
def gen_cols(Pad, cur, lag):
currency = list(np.sort(Pad['currency pair'].unique()))
tmp = Pad[Pad['currency pair'] == cur].sort_values(by=['timestamp'])
for i in range(1,lag+1):
colname1 = 'bid_lag_' + str(i)
colname2 = 'ask_lag_' + str(i)
tmp[colname1] = np.log(tmp['bid price']) - np.log(tmp['bid price'].shift(i))
tmp[colname2] = np.log(tmp['ask price']) - np.log(tmp['ask price'].shift(i))
for ccy in currency:
if ccy == cur:
pass
else:
_tmp = Pad[Pad['currency pair'] == ccy].sort_values(by=['timestamp'])
mid = pd.DataFrame(np.mean(np.asarray([_tmp['bid price'].values,_tmp['ask price'].values]), axis=0))
for i in range(1,lag+1):
colname3 = ccy + '_lag_' + str(i)
tmp[colname3] = np.log(mid) - np.log(mid.shift(i))
tmp['date'] = tmp['timestamp'].astype(str).str[0:10]
tmp['dow'] = pd.to_datetime(tmp['date']).dt.dayofweek
tmp['hh'] = tmp['timestamp'].astype(str).str[11:13]
tmp['mm'] = tmp['timestamp'].astype(str).str[14:16]
tmp['ss'] = tmp['timestamp'].astype(str).str[17:19]
tmp['time_1'] = np.sin(np.pi*tmp['dow'].values/7)
tmp['time_2'] = np.sin(np.pi*tmp['hh'].astype('int64').values/24)
tmp['time_3'] = np.sin(np.pi*tmp['mm'].astype('int64').values/60)
tmp['time_4'] = np.sin(np.pi*tmp['ss'].astype('int64').values/60)
tmp = tmp.drop(['date', 'dow','hh','mm','ss'], axis=1)
tmp = tmp.reset_index(drop=True)
tmp = tmp[lag:]
return tmp
def CreateFeature(cur, lag, week_num):
date_list = ['0201','0203','0204','0205',
'0206','0207','0208','0210',
'0211','0212','0213','0214',
'0215','0217','0218','0219',
'0220','0221','0222','0224',
'0225','0226','0227','0228','0301']
train_week_1 = date_list[0:4]
train_week_2 = date_list[4:8]
train_week_3 = date_list[8:12]
train_week_4 = date_list[12:16]
train_week_5 = date_list[16:20]
eval_week_1 = date_list[4:6]
eval_week_2 = date_list[8:10]
eval_week_3 = date_list[12:14]
eval_week_4 = date_list[16:18]
eval_week_5 = date_list[20:22]
if week_num == 1:
train_week = train_week_1
eval_week = eval_week_1
elif week_num == 2:
train_week = train_week_2
eval_week = eval_week_2
elif week_num == 3:
train_week = train_week_3
eval_week = eval_week_3
elif week_num == 4:
train_week = train_week_4
eval_week = eval_week_4
elif week_num == 5:
train_week = train_week_5
eval_week = eval_week_5
Pad_train = None
Pad_eval = None
for train_date in train_week:
filename = '../pad/pad-' + train_date + '.csv'
tmp = pd.read_csv(filename)
if Pad_train is not None:
Pad_train = Pad_train.append(tmp)
else:
Pad_train = tmp
final_train = gen_cols(Pad_train,cur,lag)
trainname = './data/train_' + cur + '_lag_' + str(lag) + '_week' + str(week_num) + '.csv'
final_train.to_csv(trainname,index=False)
for eval_date in eval_week:
filename = '../pad/pad-' + eval_date + '.csv'
tmp = pd.read_csv(filename)
if Pad_eval is not None:
Pad_eval = Pad_eval.append(tmp)
else:
Pad_eval = tmp
final_eval = gen_cols(Pad_eval,cur,lag)
evalname = './data/eval_' + cur + '_lag_' + str(lag) + '_week' + str(week_num) + '.csv'
final_eval.to_csv(evalname,index=False)
if __name__=='__main__':
CreateFeature('EURUSD', 16, 1)
```
#### File: johnnyp2587/fx-drqn/utils.py
```python
import numpy as np
import pandas as pd
from tqdm import tqdm
np.random.seed(1)
from itertools import count
Pad = pd.read_csv('PadData_v2.csv')
# Default
T = 3617
m = 16
to_draw = np.sort(Pad['timestamp'].unique())
ccy = np.sort(Pad['currency pair'].unique())
min_history = 1000 # min episode length
def generate_episode(n,cur):
_max = to_draw.shape[0]
_end = min(n+T, _max)
timeframe = to_draw[n:_end]
other_bid = np.zeros((timeframe.shape[0],ccy.shape[0]-1))
other_ask = np.zeros((timeframe.shape[0],ccy.shape[0]-1))
i = 0
for elem in ccy:
tmp = Pad[Pad['currency pair'] == elem]
if elem == cur:
target_bid = tmp[tmp.timestamp.isin(timeframe)]['bid price'].values
target_ask = tmp[tmp.timestamp.isin(timeframe)]['ask price'].values
else:
other_bid[:,i] = tmp[tmp.timestamp.isin(timeframe)]['bid price'].values
other_ask[:,i] = tmp[tmp.timestamp.isin(timeframe)]['ask price'].values
i += 1
return target_bid, target_ask, other_bid, other_ask
def features(price_path,m):
features = np.zeros((price_path.shape[0]-m,m))
for i in range(m):
features[:,i] = (np.log(price_path) - np.log(np.roll(price_path, i+1)))[m:]
return features
def get_features(target_bid, target_ask, other_bid, other_ask, m):
feature_span = features(target_bid,m)
feature_span = np.append(feature_span, features(target_ask,m), axis = 1)
for i in range(other_bid.shape[1]):
feature_span = np.append(feature_span, features(other_bid[:,i],m), axis = 1)
for j in range(other_ask.shape[1]):
feature_span = np.append(feature_span, features(other_ask[:,j],m), axis = 1)
return feature_span
def draw_episode(m, cur, min_history):
'''
Input:
m, number of lag returns z_1,...z_m
cur, currency pair that we target to trade
min_history, min length of a valid episode
'''
n = np.random.randint(to_draw.shape[0] - min_history)
target_bid, target_ask, other_bid, other_ask = generate_episode(n,cur)
feature_span = get_features(target_bid, target_ask, other_bid, other_ask, m)
normalized = (feature_span-feature_span.mean())/feature_span.std()
return target_bid, target_ask, normalized
def draw_train_episode(m, cur, min_history):
'''
Input:
m, number of lag returns z_1,...z_m
cur, currency pair that we target to trade
min_history, min length of a valid episode
'''
to_draw_train = to_draw[:int(to_draw.shape[0]*0.6)]
n = np.random.randint(to_draw_train.shape[0] - min_history)
target_bid, target_ask, other_bid, other_ask = generate_episode(n,cur)
feature_span = get_features(target_bid, target_ask, other_bid, other_ask, m)
normalized = (feature_span-feature_span.mean())/feature_span.std()
return target_bid, target_ask, normalized
``` |
{
"source": "johnnyp2587/lstm-fx",
"score": 3
} |
#### File: lstm-fx/helpers/oanda_api_helpers.py
```python
import oandapyV20
import oandapyV20.endpoints.orders as orders
import oandapyV20.endpoints.trades as trades
import oandapyV20.endpoints.accounts as accounts
import oandapyV20.endpoints.positions as positions
from oandapyV20.contrib.requests import MarketOrderRequest
import json
# TODO: make sure send_request checks if order is through on weekends and no order_book is created
class TradingSession:
# initiate objects
def __init__(self, accountID, access_token):
self.accountID = accountID
self.access_token = access_token
self.api = oandapyV20.API(access_token=access_token, environment="practice")
self.order_book = self.oanda_order_book()
# initiate methods
def send_request(self, request):
"""
Sends request to oanda API.
Returns oanda's response if success, 1 if error.
"""
try:
rv = self.api.request(request)
# print(json.dumps(rv, indent=2))
return rv
except oandapyV20.exceptions.V20Error as err:
print(request.status_code, err)
return 1
def open_order(self, instrument, units):
# check if position is already open
if units < 0:
if self.order_book[instrument]['order_type'] is (not None and -1):
print('Short: {} (holding)'.format(instrument))
return 1
elif units > 0:
if self.order_book[instrument]['order_type'] is (not None and 1):
print('Long: {} (holding)'.format(instrument))
return 1
else:
print('Units specified: 0')
return 1
# define parameters, create and send a request
mkt_order = MarketOrderRequest(instrument=instrument, units=units)
r = orders.OrderCreate(self.accountID, data=mkt_order.data)
request_data = self.send_request(r)
# check if request was fulfilled and save its ID
if request_data is not 1:
instrument = request_data['orderCreateTransaction']['instrument']
self.order_book[instrument]['tradeID'] = request_data['lastTransactionID']
self.order_book[instrument]['order_type'] = -1 if units < 0 else 1
print('{}: {}'.format('Long' if units > 0 else 'Short', instrument))
return 0
else:
return 1
def close_order(self, instrument):
# check if position exist
if self.order_book[instrument]['order_type'] is None:
print('Position {} does not exist'.format(instrument))
return 1
# create and send a request
r = trades.TradeClose(accountID=self.accountID, tradeID=self.order_book[instrument]['tradeID'])
request_data = self.send_request(r)
# check if request was fulfilled and clear it
if request_data is not 1:
instrument = request_data['orderCreateTransaction']['instrument']
self.order_book[instrument]['order_type'] = None
self.order_book[instrument]['tradeID'] = None
print('Closed: {}'.format(instrument))
return 0
else:
return 1
def check_open_positions(self):
r = positions.OpenPositions(self.accountID)
return self.send_request(r)
def check_account_summary(self):
r = accounts.AccountSummary(self.accountID)
return self.send_request(r)
def oanda_order_book(self):
"""Synchronize open positions with this object's order_book"""
order_book_oanda = self.check_open_positions()
order_book = {'EUR_USD': {'order_type': None, 'tradeID': None},
'AUD_JPY': {'order_type': None, 'tradeID': None}}
for pos in order_book_oanda['positions']:
try:
trade_id = pos['long']['tradeIDs']
order_type = 1
except KeyError:
trade_id = pos['short']['tradeIDs']
order_type = -1
order_book[pos['instrument']]['tradeID'] = trade_id
order_book[pos['instrument']]['order_type'] = order_type
return order_book
def sync_with_oanda(self):
self.order_book = self.oanda_order_book()
def close_all_open_positions(self):
"""Close all opened positions"""
# check oanda for open positions
try:
open_positions = self.check_open_positions()['positions'][0]
except IndexError:
self.order_book = self.oanda_order_book()
print('No opened positions')
return 0
# get ID's of open positions
trade_ids = []
try:
[trade_ids.append(x) for x in open_positions['short']['tradeIDs']]
except KeyError:
pass
try:
[trade_ids.append(x) for x in open_positions['long']['tradeIDs']]
except KeyError:
pass
# close orders by ID
[close_order_manually(self.accountID, self.access_token, x) for x in trade_ids]
self.order_book = self.oanda_order_book()
print('All positions closed')
return 0
def close_order_manually(accountID, access_token, tradeID):
"""
Closes order manually using tradeID.
"""
api = oandapyV20.API(access_token=access_token, environment="practice")
request = trades.TradeClose(accountID, tradeID)
rv = api.request(request)
print(json.dumps(rv, indent=2))
return 0
``` |
{
"source": "johnnyp2587/transfer-learning",
"score": 2
} |
#### File: johnnyp2587/transfer-learning/utils.py
```python
import pandas as pd
import numpy as np
import torch
from empyrical import sharpe_ratio, max_drawdown, annual_return, annual_volatility
from empyrical import sortino_ratio, downside_risk, value_at_risk, tail_ratio
from scipy.stats import skew, kurtosis
def calmar_ratio(x):
return annual_return(x).values/-max_drawdown(x)
def compute_performance_metrics(df_returns):
'''
:param df_returns:
:return:
'''
# metrics to compute
pf_metrics = [sharpe_ratio, calmar_ratio, max_drawdown, annual_return, annual_volatility,
sortino_ratio, downside_risk, value_at_risk, tail_ratio, skew, kurtosis]
pf_metrics_labels = ["SR", "CR", "MDD", "ANN_RET", "ANN_VOL", "SortR", "DownRisk", "VaR", "TailR", "Skew", "Kurt"]
# compute performance metric
df_metrics = pd.DataFrame(index=range(df_returns.shape[1]), columns=pf_metrics_labels)
for (pf, pf_label) in zip(pf_metrics, pf_metrics_labels):
df_metrics[pf_label] = np.array(pf(df_returns))
df_metrics.index = df_returns.columns
return df_metrics
def get_data(data_config, problem_config, model_config):
'''
:return:
'''
Xtrain_tasks, Xval_tasks, Xtest_tasks = {}, {}, {}
for region in data_config["region"]:
# pre-allocation
region_task_paths = [t + "_all_assets_data.pkl.gz" for t in data_config[region]]
Xtrain_tasks[region], Xval_tasks[region], Xtest_tasks[region] = {}, {}, {}
for (tk_path, tk) in zip(region_task_paths, data_config[region]):
# get data
df = pd.read_pickle(data_config["data_path"] + tk_path)
df_train = df.iloc[:-(problem_config["val_period"] + problem_config["holdout_period"])]
if problem_config["val_period"] != 0:
df_val = df.iloc[-(problem_config["val_period"] + problem_config[
"holdout_period"]):-problem_config["holdout_period"]]
else:
df_val = df.iloc[:-(problem_config["val_period"] + problem_config["holdout_period"])]
df_test = df.iloc[-problem_config["holdout_period"]:]
# transform in tensor
Xtrain_tasks[region][tk] = torch.from_numpy(df_train.values).to(model_config["device"])
Xval_tasks[region][tk] = torch.from_numpy(df_val.values).to(model_config["device"])
Xtest_tasks[region][tk] = torch.from_numpy(df_test.values).to(model_config["device"])
print(region, tk, Xtrain_tasks[region][tk].size())
return Xtrain_tasks, Xval_tasks, Xtest_tasks
def calc_tcosts(signal):
slip = 0.0005 * 0.00
bp = 0.0020 * 0.00
tc = (torch.abs(signal[:, 1:, :] - signal[:, :-1, :]) * (bp + slip))
tc = torch.cat([torch.zeros(signal.size(0), 1, signal.size(2)).double(), tc], dim=1)
return tc
``` |
{
"source": "JohnnyP88/penndjangosaml2",
"score": 2
} |
#### File: penndjangosaml2/penndjangosaml2/models.py
```python
from django.db import models
class LongGroupName(models.Model):
group_name = models.TextField(blank=False, null=False)
count = models.IntegerField(blank=False, null=False, default=0)
create_date = models.DateTimeField(auto_now=True)
last_update = models.DateTimeField(auto_now_add=True)
def __str__(self):
return "{} count: {} last updated: {})".format(self.group_name, self.count, self.last_update)
``` |
{
"source": "johnnypackard/python-intro",
"score": 3
} |
#### File: python-intro/hello_flask/app.py
```python
from flask import Flask
app = Flask("johnny")
@app.route("/")
def home():
return 'Hello, Curtis, you are lovely.'
``` |
{
"source": "johnny-pancake/PerpetualPoolsKeeperBot",
"score": 2
} |
#### File: PerpetualPoolsKeeperBot/src/pool_factory.py
```python
import time
from contract_utilities import fetch_build
from web3 import Web3
from eth_account import Account
ABI_FILE = "./artifacts/PoolFactory.json"
class PoolFactory(object):
def __init__(self, w3, address):
self.w3 = w3
json = fetch_build(ABI_FILE)
self.abi = json[0]
self.contract = w3.eth.contract(address=address, abi=self.abi)
self.address = address
def numPools(self):
return self.contract.functions.numPools().call()
def pools(self,index):
return self.contract.functions.pools(index).call()
def getPools(self):
pools = []
for i in range(self.numPools()):
pools.append(self.pools(i))
return pools
``` |
{
"source": "JohnnyPeng18/cauldron",
"score": 3
} |
#### File: cli/commands/connect.py
```python
import typing
from argparse import ArgumentParser
import requests
from requests import exceptions as request_exceptions
from cauldron import cli
from cauldron import environ
from cauldron.environ import Response
NAME = 'connect'
DESCRIPTION = """
Connect to a remote cauldron server and drive that server from this shell.
"""
def populate(
parser: ArgumentParser,
raw_args: typing.List[str],
assigned_args: dict
):
"""
:param parser:
:param raw_args:
:param assigned_args:
:return:
"""
parser.add_argument(
'url',
type=str,
default=None,
help=cli.reformat(
'The URL of the remote cauldron server including port'
)
)
parser.add_argument(
'-f', '--force',
dest='force',
default=False,
action='store_true',
help=cli.reformat(
"""
When this option is included, the connection will be established
without communicating with the remote cauldron instead to validate
the connection. This should only be used in cases where you are
absolutely confident that the connection is valid and accessible.
"""
)
)
def check_connection(url: str, force: bool) -> Response:
"""..."""
response = Response()
if force:
return response
ping = '{}/'.format(url)
response.notify(
kind='STARTING',
code='CONNECTING',
message='Establishing remote kernel connection to: {}'.format(url)
).console(
whitespace_top=1
)
try:
result = requests.get(ping)
if result.status_code != 200:
raise request_exceptions.ConnectionError()
return response.notify(
kind='CONNECTED',
code='REMOTE_CONNECTION_ESTABLISHED',
message='Remote connection established.'
).console(whitespace=1).response
except request_exceptions.InvalidURL as error:
return response.fail(
code='INVALID_URL',
message='Invalid connection URL. Unable to establish connection',
error=error
).console(
whitespace=1
).response
except request_exceptions.ConnectionError as error:
return response.fail(
code='CONNECTION_ERROR',
message='Unable to connect to remote cauldron host',
error=error
).console(
whitespace=1
).response
except Exception as error:
return response.fail(
code='CONNECT_COMMAND_ERROR',
message='Failed to connect to the remote cauldron host',
error=error
).console(
whitespace=1
).response
def _clean_url(url: str) -> str:
"""..."""
return '{}{}{}'.format(
'' if url.startswith('http') else 'http://',
'127.0.0.1' if url.startswith(':') else '',
url.strip().rstrip('/')
)
def execute(
context: cli.CommandContext,
url: str = None,
force: bool = False
) -> Response:
"""..."""
url_clean = _clean_url(url)
context.response.consume(check_connection(url_clean, force))
if context.response.failed:
return context.response
environ.remote_connection.url = url_clean
environ.remote_connection.active = True
return context.response.update(
url=url_clean,
remote_connection=environ.remote_connection
).notify(
kind='SUCCESS',
code='CONNECTED',
message='Connected to "{}"'.format(url_clean)
).console(
whitespace_bottom=1
).response
def autocomplete(segment: str, line: str, parts: typing.List[str]):
"""..."""
return []
```
#### File: commands/listing/discovery.py
```python
import os
from cauldron import environ
from cauldron.environ import Response
from cauldron.session.projects import specio
def get_known_root_paths():
"""..."""
aliases = environ.configs.fetch('folder_aliases', {})
root_paths = list(set(
[os.path.dirname(p) for p in environ.configs.fetch('recent_paths', [])]
+ [a['path'] for a in aliases.values()]
))
index = 0
while index < len(root_paths):
path = root_paths[index]
children_paths = [
p
for i, p in enumerate(root_paths)
if index != i and p.startswith(path)
]
for p in children_paths:
root_paths.remove(p)
index += 1
return list(sorted(root_paths))
def echo_known_projects(response: Response) -> Response:
"""..."""
environ.configs.load()
project_specs = specio.ProjectSpecsReader()
for root in get_known_root_paths():
project_specs.add_recursive(root, root_path=root)
spec_groups = project_specs.group_by('root_path')
results = '\n\n'.join([
'{}\n{}'.format(root, specio.to_display_list(specs))
for root, specs in spec_groups.items()
])
return (
response
.update(
specs=project_specs.specs,
spec_groups=spec_groups
)
.notify(
kind='FOUND',
code='FOUND',
message='The following projects:\n\n{}'.format(results)
)
.console(whitespace=1)
).response
```
#### File: commands/listing/_lister.py
```python
from cauldron import cli
from cauldron import environ
from cauldron.cli.commands.listing import _utils
from cauldron.session.projects import specio
def execute_list(context: cli.CommandContext) -> environ.Response:
"""
Executes the list action for the recent command according to the
specified context object for the currently invoked command and
returns a response object containing the listed projects.
"""
projects = _utils.get_recent_projects()
if projects.specs:
display = 'Recent Projects:\n\n{}'.format(
specio.to_display_list(projects.specs)
)
else:
display = 'No recent projects found.'
return (
context.response
.update(projects=projects.specs)
.notify(
kind='RESULT',
code='PROJECT_HISTORY',
message=display
)
.console(whitespace=1)
.response
)
```
#### File: commands/steps/__init__.py
```python
import typing
from argparse import ArgumentParser
import cauldron
from cauldron import cli
from cauldron.cli import sync
from cauldron.cli.commands import sync as sync_command
from cauldron.cli.commands.open import opener as project_opener
from cauldron.cli.commands.steps import actions
from cauldron.cli.commands.steps import removal
from cauldron.cli.commands.steps import selection
from cauldron.cli.interaction import autocompletion
from cauldron.environ import Response
from cauldron.session import projects
NAME = 'steps'
DESCRIPTION = """
Carry out an action on one or more steps within the currently opened
project. The available actions are:
* [add]: Creates a new step
* [list]: Lists the steps within the currently opened project
* [modify]: Modifies an existing step
* [remove]: Removes an existing step from the project
* [unmute]: Enables a step within the active project
* [mute]: Disables a step within the active project
"""
def populate(
parser: ArgumentParser,
raw_args: typing.List[str],
assigned_args: dict
):
"""..."""
if len(raw_args) < 1:
assigned_args['action'] = 'list'
return
action = raw_args.pop(0).lower()
assigned_args['action'] = action
if action == 'add':
parser.add_argument(
'step_name',
type=str,
nargs='?',
help=cli.reformat(
"""
The name of the step you want to create
"""
)
)
elif action not in ['list', 'clean']:
parser.add_argument(
'step_name',
type=str,
help=cli.reformat(
"""
The name of the step on which to carry out the steps action
"""
)
)
if action in ['mute', 'unmute', 'select']:
return
if action in ['add', 'modify']:
parser.add_argument(
'-p', '--position',
dest='position',
type=str,
default=None,
help=cli.reformat(
"""
Specifies the index where the step will be inserted, or the
name of the step after which this new step will be inserted.
"""
)
)
parser.add_argument(
'-t', '--title',
dest='title',
type=str,
default=None,
help=cli.reformat(
"""
This specifies the title for the step that will be added or
modified
"""
)
)
if action == 'modify':
parser.add_argument(
'-n', '--name',
dest='new_name',
type=str,
default=None,
help=cli.reformat(
"""
This new name for the step when modifying an existing one
"""
)
)
if action == 'remove':
parser.add_argument(
'-k', '--keep',
dest='keep',
default=False,
action='store_true',
help=cli.reformat(
"""
Whether or not to keep the source file when removing a step
from a project
"""
)
)
def execute_remote(
context: cli.CommandContext,
action: str = None,
step_name: str = None,
position: str = None,
title: str = None,
new_name: str = None,
keep: bool = False,
) -> Response:
"""..."""
# modification_start_timestamp = time.time()
if action in ['list', 'clean', 'select']:
thread = sync.send_remote_command(
command=context.name,
raw_args=context.raw_args,
asynchronous=False
)
thread.join()
response = thread.responses[0]
return context.response.consume(response)
status_response = sync.comm.send_request(
endpoint='/sync-status',
remote_connection=context.remote_connection
)
if status_response.failed:
return context.response.consume(status_response)
source_directory = status_response.data['remote_source_directory']
if not project_opener.project_exists(context.response, source_directory):
return context.response
context.response.consume(execute(
context=context,
action=action,
step_name=step_name,
position=position,
title=title,
new_name=new_name,
keep=keep,
project=projects.Project(source_directory)
))
if context.response.failed:
return context.response
sync_response = sync_command.execute(cli.make_command_context(
name=sync_command.NAME,
remote_connection=context.remote_connection
))
sync_response.join()
return context.response.consume(sync_response)
def execute(
context: cli.CommandContext,
action: str = None,
step_name: str = None,
position: str = None,
title: str = None,
new_name: str = None,
keep: bool = False,
project: 'projects.Project' = None
) -> Response:
"""..."""
response = context.response
project = (
project
if project else
cauldron.project.get_internal_project()
)
if not project:
return response.fail(
code='NO_OPEN_PROJECT',
message='No project is open. Step commands require an open project'
).console(
whitespace=1
).response
if not action or action == 'list':
actions.echo_steps(response, project)
return response
if action == 'clean':
return actions.clean_steps(response, project)
if action == 'add' and not step_name:
step_name = ''
elif not step_name:
return response.fail(
code='NO_STEP_NAME',
message='A step name is required for this command'
).console(
whitespace=1
).response
step_name = step_name.strip('"')
if action == 'add':
return actions.create_step(
response=response,
project=project,
name=step_name,
position=position,
title=title.strip('"') if title else title
)
if action == 'modify':
return actions.modify_step(
response=response,
project=project,
name=step_name,
new_name=new_name,
title=title,
position=position
)
if action == 'remove':
return removal.remove_step(
response=response,
project=project,
name=step_name,
keep_file=keep
)
if action == 'unmute':
actions.toggle_muting(
response=response,
project=project,
step_name=step_name,
value=False
)
return response
if action == 'mute':
actions.toggle_muting(
response=response,
project=project,
step_name=step_name,
value=True
)
return response
if action == 'select':
return selection.select_step(
response=response,
project=project,
step_name=step_name
)
def autocomplete(segment: str, line: str, parts: typing.List[str]):
"""
:param segment:
:param line:
:param parts:
:return:
"""
action_names = [
'add',
'list',
'remove',
'modify',
'select',
'unmute',
'mute',
'clean'
]
if len(parts) < 2:
return autocompletion.matches(segment, parts[0], action_names)
action = parts[0]
if action == 'list':
return []
project = cauldron.project.internal_project
if len(parts) < 3 or parts[-1].startswith(('--position=', '-p ')):
prefix = parts[-1]
for remove in ['--position=', '-p ']:
if prefix.startswith(remove):
prefix = prefix[len(remove):]
break
prefix = prefix.strip().strip('"')
step_names = [x.definition.name for x in project.steps]
return autocompletion.match_in_path_list(
segment,
prefix,
step_names
)
if parts[-1].startswith('-'):
if action == 'list':
return []
shorts = []
longs = []
if action == 'remove':
shorts.append('k')
longs.append('keep')
else:
shorts += ['p', 't']
longs += ['position=', 'title=']
if action == 'modify':
shorts.append('n')
longs.append('name=')
return autocompletion.match_flags(
segment=segment,
value=parts[-1],
shorts=shorts,
longs=longs
)
return []
```
#### File: commands/sync/__init__.py
```python
from cauldron import cli
from cauldron.cli import sync
from cauldron.cli.commands.sync.syncer import do_synchronize
from cauldron.environ.response import Response
from cauldron.session.projects import specio
NAME = 'sync'
DESCRIPTION = """
Synchronizes the remote cauldron connection with the most recent versions
of the locally stored project files.
"""
def _on_failure(
context: cli.CommandContext,
code: str,
message: str
) -> Response:
"""Convenience function for handling failures."""
return (
context.response
.fail(code=code, message=message)
.console(whitespace=1)
.response
)
def execute(context: cli.CommandContext) -> Response:
"""Runs the sync command."""
if not context.remote_connection.active:
return _on_failure(
context,
code='NO_REMOTE_CONNECTION',
message='No active remote connection. Nothing to sync.'
)
status_response = sync.comm.send_request(
endpoint='/sync-status',
method='GET',
remote_connection=context.remote_connection
)
source_directory = status_response.data.get('remote_source_directory')
if status_response.failed or not source_directory:
status_response.log_notifications()
return context.response.consume(status_response)
project_spec = specio.get_project_info(source_directory)
if project_spec is None:
return _on_failure(
context,
code='NO_PROJECT',
message='No project exists locally at: {}'.format(source_directory)
)
return context.response.consume(do_synchronize(
remote_connection=context.remote_connection,
source_directory=source_directory,
newer_than=context.remote_connection.sync_timestamp,
library_folders=project_spec.get('library_folders', ['libs'])
))
```
#### File: server/routes/ui_statuses.py
```python
import flask
from cauldron.cli.server import run as server_runner
from cauldron.ui import arguments
from cauldron.ui import statuses
@server_runner.APPLICATION.route('/ui-status', methods=['POST'])
def ui_status():
args = arguments.from_request()
last_timestamp = args.get('last_timestamp', 0)
force = args.get('force', False)
results = statuses.get_status(last_timestamp, force)
return flask.jsonify(results)
```
#### File: cauldron/environ/__init__.py
```python
import time
import typing
import sys
from collections import namedtuple
from datetime import datetime
from datetime import timedelta
from cauldron.cli import threads
from cauldron.environ import modes
from cauldron.environ import paths
from cauldron.environ import systems
from cauldron.environ.configuration import Configuration
from cauldron.environ.logger import blanks as log_blanks
from cauldron.environ.logger import header as log_header
from cauldron.environ.logger import log
from cauldron.environ.logger import raw as log_raw
from cauldron.environ.response import Response
VersionInfo = namedtuple('VersionInfo', ['major', 'minor', 'micro'])
class RemoteConnection:
"""Contains remote execution status information."""
def __init__(self, active: bool = False, url: str = None):
self.active = active # type: bool
self.url = url # type: typing.Optional[str]
self.local_project_directory = None # type: typing.Optional[str]
self._sync_timestamp = 0 # type: int
self._sync_active = False # type: bool
@property
def sync_timestamp(self) -> float:
"""Last time the sync action to the remote source started."""
return max(0, self._sync_timestamp - 2)
def serialize(self) -> dict:
return {
'active': self.active,
'url': self.url,
'sync': {
'timestamp': self._sync_timestamp,
'active': self._sync_active,
}
}
def reset_sync_time(self):
"""
Reverts the sync time to 0, which is needed when a sync state
needs to be rewound, e.g. when closing a project.
"""
self._sync_timestamp = 0
def sync_starting(self):
"""..."""
self._sync_active = True
self._sync_timestamp = time.time()
def sync_ending(self):
"""..."""
self._sync_active = False
remote_connection = RemoteConnection()
start_time = datetime.utcnow()
configs = Configuration().put(
persists=False,
directory=paths.INITIAL_DIRECTORY,
)
package_settings = systems.get_package_data()
version = package_settings.get('version', '0.0.0')
notebook_version = package_settings.get('notebookVersion', 'v0')
python_version = '{}.{}.{}'.format(
sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro,
)
version_info = VersionInfo(*[int(x) for x in version.split('.')])
abort_thread = threads.abort_thread
#: Holds information about open viewer (reader) files
#: and is None when no such file has been opened for
#: viewing.
view = None # type: typing.Optional[dict]
def run_time() -> timedelta:
"""..."""
delta = start_time if start_time else datetime.utcnow()
return datetime.utcnow() - delta
```
#### File: cauldron/cauldron/__init__.py
```python
from cauldron import environ as _environ
from cauldron import plotting # noqa
from cauldron import session as _session
from cauldron.session import display as _display
from cauldron.session import spark as _spark
from cauldron.session.caching import SharedCache as _SharedCache
from cauldron.session.definitions import ExecutionResult
from cauldron.session.reloading import refresh as _refresh
# Version Information in commonly viewed formats
__version__ = _environ.version # type: str
version = _environ.version # type: str
version_info = _environ.version_info # type: _environ.VersionInfo
project = _session.project # type: _session.ExposedProject
step = _session.step # type: _session.ExposedStep
shared = _session.project.shared # type: _SharedCache
display = _display
refresh = _refresh
mode = _environ.modes.ExposedModes
spark = _spark
def get_environment_info() -> dict:
"""
Information about Cauldron and its Python interpreter.
:return:
A dictionary containing information about the Cauldron and its
Python environment. This information is useful when providing feedback
and bug reports.
"""
data = _environ.systems.get_system_data()
data['cauldron'] = _environ.package_settings.copy()
return data
def run_shell():
""" Starts the cauldron shell environment for console based interaction."""
from cauldron.cli.shell import CauldronShell
CauldronShell().cmdloop()
def run_server(port=5010, debug=False, **kwargs):
"""
Run the cauldron http server used to interact with cauldron from a remote
host.
:param port:
The port on which to bind the cauldron server.
:param debug:
Whether or not the server should be run in debug mode. If true, the
server will echo debugging information during operation.
:param kwargs:
Custom properties to alter the way the server runs.
"""
from cauldron.cli.server import run
run.execute(port=port, debug=debug, **kwargs)
def run_project(
project_directory: str,
output_directory: str = None,
logging_path: str = None,
reader_path: str = None,
reload_project_libraries: bool = False,
forget_project: bool = False,
**kwargs
) -> ExecutionResult:
"""
Runs a project as a single command directly within the current Python
interpreter.
:param project_directory:
The fully-qualified path to the directory where the Cauldron project is
located
:param output_directory:
The fully-qualified path to the directory where the results will be
written. All of the results files will be written within this
directory. If the directory does not exist, it will be created.
:param logging_path:
The fully-qualified path to a file that will be used for logging. If a
directory is specified instead of a file, a file will be created using
the default filename of cauldron_run.log. If a file already exists at
that location it will be removed and a new file created in its place.
:param reader_path:
Specifies a path where a reader file will be saved after the project
has finished running. If no path is specified, no reader file will be
saved. If the path is a directory, a reader file will be saved in that
directory with the project name as the file name.
:param reload_project_libraries:
Whether or not to reload all project libraries prior to execution of
the project. By default this is False, but can be enabled in cases
where refreshing the project libraries before execution is needed.
:param kwargs:
Any variables to be available in the cauldron.shared object during
execution of the project can be specified here as keyword arguments.
:return:
A response object that contains information about the run process
and the shared data from the final state of the project.
"""
from cauldron.cli import batcher
return batcher.run_project(
project_directory=project_directory,
output_directory=output_directory,
log_path=logging_path,
reader_path=reader_path,
reload_project_libraries=reload_project_libraries,
forget_project=forget_project,
shared_data=kwargs
)
```
#### File: cauldron/render/__init__.py
```python
import json as json_internal
import math
import os
import random
import re
import textwrap
import typing
from datetime import datetime
from datetime import timedelta
from cauldron import environ
from cauldron import templating
from cauldron.render import encoding
from cauldron.render import inspection
from cauldron.render import syntax_highlighting
from cauldron.render import utils as render_utils
def elapsed_time(seconds: float) -> str:
"""Displays the elapsed time since the current step started running."""
environ.abort_thread()
parts = (
'{}'.format(timedelta(seconds=seconds))
.rsplit('.', 1)
)
hours, minutes, seconds = parts[0].split(':')
return templating.render_template(
'elapsed_time.html',
hours=hours.zfill(2),
minutes=minutes.zfill(2),
seconds=seconds.zfill(2),
microseconds=parts[-1] if len(parts) > 1 else ''
)
def list_grid(
source: list,
expand_full: bool = False,
column_count: int = 2,
row_spacing: float = 1
):
"""
:param source:
:param expand_full:
:param column_count:
:param row_spacing:
:return:
"""
environ.abort_thread()
max_width = 1400 if expand_full else 900
column_width = '{}px'.format(
max(50, int(math.floor(max_width / column_count)))
)
return templating.render_template(
'list_grid.html',
items=['{}'.format(x) for x in source],
css_modifier='full' if expand_full else 'limited',
column_width=column_width,
row_spacing=row_spacing
)
def listing(
source: list,
ordered: bool = False,
expand_full: bool = False
) -> str:
"""
:param source:
:param ordered:
:param expand_full:
:return:
"""
environ.abort_thread()
return templating.render_template(
'listing.html',
type='ol' if ordered else 'ul',
items=['{}'.format(x) for x in source],
css_modifier='full' if expand_full else 'limited'
)
def inspect(source: dict) -> str:
"""
:param source:
:return:
"""
environ.abort_thread()
out = inspection.inspect_data(source=source)
return inspection.render_tree(out)
def code_file(
path: str,
language: str = None,
mime_type: str = None,
is_code_block: bool = False
) -> str:
"""
:param path:
:param language:
:param mime_type:
:param is_code_block:
:return:
"""
environ.abort_thread()
path = environ.paths.clean(path)
if not os.path.exists(path):
return 'File does not exist: {}'.format(path)
source = None
for encoding in ['utf-8', 'mac_roman', 'cp1250']:
try:
with open(path, 'r', encoding=encoding) as f:
source = f.read()
break
except Exception:
pass
if source is None:
return ''
return code(
source=source,
language=language,
filename=path,
mime_type=mime_type,
is_code_block=is_code_block
)
def code(
source: str,
language: str = None,
filename: str = None,
mime_type: str = None,
is_code_block: bool = False
) -> str:
"""
:param source:
:param language:
:param filename:
:param mime_type:
:param is_code_block:
:return:
"""
environ.abort_thread()
if not source:
return ''
cleaned = textwrap.dedent(source.strip('\n'))
return syntax_highlighting.as_html(
source=cleaned,
language=language,
filename=filename,
mime_type=mime_type,
is_code_block=is_code_block
)
def code_block(
block: str = None,
path: str = None,
language: str = None,
title: str = None,
caption: str = None
) -> str:
"""
:param block:
:param path:
:param language:
:param title:
:param caption:
:return:
"""
environ.abort_thread()
code_dom = (
code_file(path, language=language, is_code_block=True)
if path else
code(block, language=language, is_code_block=True)
)
return templating.render_template(
'code-block.html',
code=code_dom,
title=title,
caption=caption
)
def header(contents: str, level: int = 1, expand_full: bool = False) -> str:
"""
:param level:
:param contents:
:param expand_full:
:return:
"""
environ.abort_thread()
classes = [
'cd-Header',
'cd-Header--{}'.format('full' if expand_full else 'limited')
]
return templating.render(
"""
<h{{ level }} class="{{ classes }}">{{ contents }}</h{{ level }}>
""",
level=level,
contents=contents,
classes=' '.join(classes)
)
def image(
rendered_path: str,
width: int = None,
height: int = None,
justify: str = None
) -> str:
"""Renders an image block"""
environ.abort_thread()
return templating.render_template(
'image.html',
path=rendered_path,
width=width,
height=height,
justification=(justify or 'left').lower()
)
def json(**kwargs) -> str:
"""
Adds the specified data to the the output display window with the
specified key. This allows the user to make available arbitrary
JSON-compatible data to the display for runtime use.
:param kwargs:
Each keyword argument is added to the CD.data object with the
specified key and value.
"""
environ.abort_thread()
return templating.render_template(
'json_include.html',
data=json_internal.dumps(kwargs, cls=encoding.ComplexJsonEncoder)
)
def html(content: str) -> str:
"""
A string containing a valid HTML snippet.
:param content:
The HTML string rendered for display.
"""
environ.abort_thread()
return templating.render(
'<div class="box">{{content}}</div>',
content=content
)
def plotly(
data: list = None,
layout: dict = None,
scale: float = 0.5,
figure: dict = None,
static: bool = False
) -> str:
"""
Creates a Plotly plot in the display with the specified data and
layout.
:param data:
The Plotly trace data to be plotted.
:param layout:
The layout data used for the plot.
:param scale:
The display scale with units of fractional screen height. A value
of 0.5 constrains the output to a maximum height equal to half the
height of browser window when viewed. Values below 1.0 are usually
recommended so the entire output can be viewed without scrolling.
:param figure:
In cases where you need to create a figure instead of separate data
and layout information, you can pass the figure here and leave the
data and layout values as None.
:param static:
If true, the plot will be created without interactivity.
This is useful if you have a lot of plots in your notebook.
"""
environ.abort_thread()
try:
import plotly as plotly_lib
except ImportError:
plotly_lib = None
if plotly_lib is None:
return templating.render_template(
template_name='import-error.html',
library_name='Plotly'
)
source = figure if figure else {'data': data, 'layout': layout}
dom = plotly_lib.offline.plot(
figure_or_data=source,
output_type='div',
include_plotlyjs=False,
config={'staticPlot': static, 'showLink': False}
)
found = re.search(r'id="(?P<id>[^"]+)"', dom)
dom_id = found.group('id')
# Plotly < 4.0 requires manually inserting the static value.
if static and dom.find('"staticPlot": ') < 0: # pragma: no cover
insert_index = dom.index('"showLink":')
dom = ''.join([
dom[:insert_index],
'"staticPlot": {}, '.format('true' if static else 'false'),
dom[insert_index:]
])
return templating.render_template(
'plotly-component.html',
dom=dom,
scale=scale,
min_height=round(100.0 * scale),
id=dom_id
)
def table(
data_frame,
scale: float = 0.7,
include_index: bool = False,
max_rows: int = 500,
sample_rows: typing.Optional[int] = None,
formats: typing.Union[
str,
typing.Callable[[typing.Any], str],
typing.Dict[
str,
typing.Union[str, typing.Callable[[typing.Any], str]]
]
] = None
) -> str:
"""
:param data_frame:
:param scale:
:param include_index:
:param max_rows:
:param sample_rows:
:param formats:
"""
environ.abort_thread()
table_id = 'table-{}-{}'.format(
datetime.utcnow().strftime('%H-%M-%S-%f'),
random.randint(0, 1e8)
)
df_source = (
data_frame.to_frame()
if hasattr(data_frame, 'to_frame') else
data_frame
)
df_source = (
df_source.sample(n=sample_rows)
if sample_rows and sample_rows > 0 else
df_source
)
df_source = (
df_source.head(max_rows)
if len(df_source) > max_rows else
df_source
)
if formats and not hasattr(formats, 'items'):
formats = {name: formats for name in df_source.columns}
if include_index:
df_source = df_source.reset_index()
df_source = df_source.assign(**{
name: df_source[name].map(
getattr(format_definition, 'format', format_definition)
)
for name, format_definition in (formats or {}).items()
if name in df_source
})
column_headers = ['"{}"'.format(x) for x in df_source.columns.tolist()]
data = df_source.values.tolist()
json_data = json_internal.dumps(data, cls=encoding.ComplexJsonEncoder)
return templating.render_template(
'table.html',
id=table_id,
scale=min(0.95, max(0.05, scale)),
data=json_data,
column_headers=', '.join(column_headers)
)
def whitespace(lines: float = 1.0) -> str:
"""
:param lines:
:return:
"""
environ.abort_thread()
pixels = round(12 * lines)
return '<div style="height:{}px"> </div>'.format(pixels)
def jinja(path: str, **kwargs) -> str:
"""
:param path:
:param kwargs:
:return:
"""
environ.abort_thread()
return templating.render_file(path, **kwargs)
def svg(svg_data: str) -> str:
"""
:param svg_data:
:return:
"""
environ.abort_thread()
return templating.render(
'<div class="svg-box">{{ svg }}</div>',
svg=svg_data
)
def status(
data: dict,
values: bool = True,
types: bool = True
) -> str:
"""
:param data:
:param values:
:param types:
:return:
"""
environ.abort_thread()
out = []
keys = list(data.keys())
keys.sort()
for key in keys:
value = data[key]
value_type = getattr(
value,
'__class__',
{'__name__': 'Unknown'}
).__name__
if hasattr(value, 'head'):
try:
value = value.head(5)
except Exception:
pass
elif isinstance(value, dict):
temp_value = []
for k, v in value.items():
temp_value.append('{}: {}'.format(k, v))
value = '\n'.join(temp_value)
elif isinstance(value, (list, tuple)):
value = '\n'.join(['{}'.format(v) for v in value])
value = '<pre>{}</pre>'.format(
render_utils.html_escape('{}'.format(value))[:600]
)
out.append(templating.render_template(
'status-variable.template.html',
name=key,
values=values,
types=types,
type=value_type,
value=value
))
return ''.join(out)
```
#### File: cauldron/session/definitions.py
```python
import os
import typing
import warnings
from cauldron import environ
from cauldron.session import projects
from cauldron.session import caching
class ExecutionResult:
"""Data Structure for data returned by batched project runs."""
def __init__(
self,
command_response: 'environ.Response',
project_data: 'caching.SharedCache'
):
self._response = command_response
self._shared = project_data
@property
def success(self) -> bool:
"""Whether or not the project execution succeeded."""
return self._response.success
@property
def failed(self) -> bool:
"""Whether or not the execution failed."""
return self._response.failed
@property
def response(self) -> 'environ.Response':
"""Command execution response"""
return self._response
@property
def shared(self) -> 'caching.SharedCache':
"""Shared data from the final execution state of the project."""
return self._shared
class FileDefinition(object):
"""..."""
def __init__(
self,
data: typing.Union[dict, str]=None,
project: 'projects.Project' = None,
project_folder: typing.Union[typing.Callable, str]=None
):
"""
:param data:
:param project:
"""
self.project = project
self.project_folder = project_folder
if isinstance(data, str):
self.data = {'name': data}
elif data is None:
self.data = {}
else:
self.data = data
@property
def slug(self):
folder = self.folder
if not folder:
return self.name
return os.path.join(folder, self.name)
@property
def name(self):
if 'name' not in self.data or not self.data['name']:
return 'invalid-file-name'
return self.data.get('name')
@name.setter
def name(self, value: str):
if value is None:
self.remove('name')
return
self.data['name'] = value
@property
def folder(self) -> typing.Union[str, None]:
"""
The folder, relative to the project source_directory, where the file
resides
:return:
"""
if 'folder' in self.data:
return self.data.get('folder')
elif self.project_folder:
if callable(self.project_folder):
return self.project_folder()
else:
return self.project_folder
return None
@folder.setter
def folder(self, value: str):
if value is None:
self.remove('folder')
return
self.data['folder'] = value
@property
def title(self) -> str:
return self.data.get('title', self.name)
@title.setter
def title(self, value: str):
if value is None:
self.remove('title')
return
self.data['title'] = value
def remove(self, key):
"""
:param key:
:return:
"""
if key in self.data:
del self.data[key]
def get(self, key, default_value=None):
"""
:param key:
:param default_value:
:return:
"""
if hasattr(self, key):
warnings.warn(
message='FileDefinition has a "{}" attribute'.format(key),
category=DeprecationWarning
)
return self.data.get(key, default_value)
def serialize(self) -> typing.Union[dict, str]:
"""
:return:
"""
out = dict()
for k, v in self.data.items():
if v is not None:
out[k] = v
keys = list(out.keys())
if len(keys) == 1 and keys[0] == 'name':
return self.name
return out
```
#### File: cauldron/session/naming.py
```python
import os
import re
import typing
def find_default_filename(existing_names: typing.List[str]) -> dict:
other_names = [split_filename(n)['name'] for n in existing_names]
index = 0
for i in range(1000):
index += 1
name = '{}'.format(index)
if name not in other_names:
return name
return None
def split_filename(name: str) -> dict:
"""
:param name:
:return:
"""
filename = os.path.basename(name)
parts = filename.rsplit('.', 1)
return dict(
index=None,
name=parts[0],
extension=parts[1] if len(parts) > 1 else None
)
def explode_filename(name: str, scheme: str) -> dict:
"""
Removes any path components from the input filename and returns a
dictionary containing the name of the file without extension and the
extension (if an extension exists)
:param name:
:param scheme:
:return:
"""
if not scheme:
return split_filename(name)
replacements = {
'name': '(?P<name>.*)',
'ext': '(?P<extension>.+)$',
'index': '(?P<index>[0-9]{{{length}}})'
}
scheme_pattern = '^'
empty_scheme_pattern = ''
offset = 0
while offset < len(scheme):
char = scheme[offset]
next_char = scheme[offset + 1] if (offset + 1) < len(scheme) else None
if char in r'.()^$?*+\[]|':
addition = '\\{}'.format(char)
scheme_pattern += addition
empty_scheme_pattern += addition
offset += 1
continue
if char != '{':
scheme_pattern += char
empty_scheme_pattern += char
offset += 1
continue
if next_char != '{':
scheme_pattern += char
empty_scheme_pattern += char
offset += 1
continue
end_index = scheme.find('}}', offset)
contents = scheme[offset:end_index].strip('{}').lower()
if contents in replacements:
scheme_pattern += replacements[contents]
elif contents == ('#' * len(contents)):
addition = replacements['index'].format(length=len(contents))
scheme_pattern += addition
empty_scheme_pattern += addition
else:
addition = '{{{}}}'.format(contents)
scheme_pattern += addition
empty_scheme_pattern += addition
offset = end_index + 2
match = re.compile(scheme_pattern).match(name)
if not match:
parts = split_filename(name)
comparison = re.compile(empty_scheme_pattern.rstrip('-_: .\\'))
match = comparison.match(parts['name'])
if not match:
return parts
parts = match.groupdict()
index = parts.get('index')
index = int(index) if index else None
return dict(
index=index - 1,
name=parts.get('name', ''),
extension=parts.get('extension', 'py')
)
def assemble_filename(
name: str,
scheme: str,
extension: str = None,
index: int = None
) -> str:
"""
:param name:
:param scheme:
:param extension:
:param index:
:return:
"""
if not name:
name = ''
if not extension:
extension = 'py'
if index is None:
index = 0
if not scheme:
return '{}.{}'.format(name, extension)
out = scheme
pattern = re.compile('{{(?P<count>[#]+)}}')
match = pattern.search(scheme)
if match:
out = '{before}{replace}{after}'.format(
before=out[:match.start()],
replace='{}'.format(index + 1).zfill(len(match.group('count'))),
after=out[match.end():]
)
replacements = {
'{{name}}': name,
'{{ext}}': extension
}
for pattern, value in replacements.items():
out = out.replace(pattern, value)
parts = split_filename(out)
if not name:
parts['name'] = parts['name'].rstrip('-_: .')
return '{}.{}'.format(parts['name'].strip(), parts['extension'])
```
#### File: session/projects/project.py
```python
import functools
import hashlib
import json
import os
import time
import typing
from collections import namedtuple
from cauldron import environ
from cauldron.session import definitions as file_definitions
from cauldron.session import writing
from cauldron.session.caching import SharedCache
from cauldron.session.projects import definitions
from cauldron.session.projects import steps
from cauldron.session.report import Report
DEFAULT_SCHEME = 'S{{##}}-{{name}}.{{ext}}'
StopCondition = namedtuple('StopCondition', ['aborted', 'halt'])
class Project:
"""..."""
def __init__(
self,
source_directory: str,
results_path: str = None,
shared: typing.Union[dict, SharedCache] = None
):
"""
:param source_directory:
:param results_path:
[optional] The path where the results files for the project will
be saved. If omitted, the default global results path will be
used.
:param shared:
[optional] The shared data cache used to store project data when
run
"""
source_directory = environ.paths.clean(source_directory)
if os.path.isfile(source_directory):
source_directory = os.path.dirname(source_directory)
self.source_directory = source_directory
self.steps = [] # type: typing.List[steps.ProjectStep]
self._results_path = results_path # type: str
self._current_step = None # type: steps.ProjectStep
self.last_modified = None
self.remote_source_directory = None # type: str
def as_shared_cache(source):
if source and not hasattr(source, 'fetch'):
return SharedCache().put(**source)
return source or SharedCache()
self.stop_condition = StopCondition(False, False) # type: StopCondition
self.shared = as_shared_cache(shared)
self.settings = SharedCache()
self.refresh()
@property
def uuid(self) -> str:
"""
The unique identifier for the project among all other projects, which
is based on a hashing of the project's source path to prevent naming
collisions when storing project information from multiple projects in
the same directory (e.g. common results directory).
"""
return hashlib.sha1(self.source_path.encode()).hexdigest()
@property
def is_remote_project(self) -> bool:
"""Whether or not this project is remote"""
project_path = environ.paths.clean(self.source_directory)
return project_path.find('cd-remote-project') != -1
@property
def library_directories(self) -> typing.List[str]:
"""
The list of directories to all of the library locations
"""
def listify(value):
return [value] if isinstance(value, str) else list(value)
# If this is a project running remotely remove external library
# folders as the remote shared libraries folder will contain all
# of the necessary dependencies
is_local_project = not self.is_remote_project
folders = [
f
for f in listify(self.settings.fetch('library_folders', ['libs']))
if is_local_project or not f.startswith('..')
]
# Include the remote shared library folder as well
folders.append('../__cauldron_shared_libs')
# Include the project directory as well
folders.append(self.source_directory)
return [
environ.paths.clean(os.path.join(self.source_directory, folder))
for folder in folders
]
@property
def asset_directories(self):
"""..."""
def listify(value):
return [value] if isinstance(value, str) else list(value)
folders = listify(self.settings.fetch('asset_folders', ['assets']))
return [
environ.paths.clean(os.path.join(self.source_directory, folder))
for folder in folders
]
@property
def has_error(self):
"""..."""
for s in self.steps:
if s.error:
return True
return False
@property
def title(self) -> str:
out = self.settings.fetch('title')
if out:
return out
out = self.settings.fetch('name')
if out:
return out
return self.id
@title.setter
def title(self, value: str):
self.settings.title = value
@property
def id(self) -> str:
return self.settings.fetch('id', 'unknown')
@property
def naming_scheme(self) -> str:
return self.settings.fetch('naming_scheme', None)
@naming_scheme.setter
def naming_scheme(self, value: typing.Union[str, None]):
self.settings.put(naming_scheme=value)
@property
def current_step(self) -> typing.Union['steps.ProjectStep', None]:
if len(self.steps) < 1:
return None
step = self._current_step
return step if step else self.steps[0]
@current_step.setter
def current_step(self, value: typing.Union[Report, None]):
self._current_step = value
@property
def source_path(self) -> typing.Union[None, str]:
directory = self.source_directory
return os.path.join(directory, 'cauldron.json') if directory else None
@property
def results_path(self) -> str:
"""The path where the project results will be written"""
def possible_paths():
yield self._results_path
yield self.settings.fetch('path_results')
yield environ.configs.fetch('results_directory')
yield environ.paths.results(self.uuid)
return next(p for p in possible_paths() if p is not None)
@results_path.setter
def results_path(self, value: str):
self._results_path = environ.paths.clean(value)
@property
def url(self) -> str:
"""
Returns the URL that will open this project results file in the browser
:return:
"""
return 'file://{path}?id={id}'.format(
path=os.path.join(self.results_path, 'project.html'),
id=self.uuid
)
@property
def baked_url(self) -> str:
"""
Returns the URL that will open this project results file in the browser
with the loading information baked into the file so that no URL
parameters are needed to view it, which is needed on platforms like
windows
"""
return 'file://{path}'.format(
path=os.path.join(self.results_path, 'display.html'),
id=self.uuid
)
@property
def output_directory(self) -> str:
"""
Returns the directory where the project results files will be written
"""
return os.path.join(self.results_path, 'reports', self.uuid, 'latest')
@property
def output_path(self) -> str:
"""
Returns the full path to where the results.js file will be written
:return:
"""
return os.path.join(self.output_directory, 'results.js')
def select_step(
self,
step_name_or_index: typing.Union[str, int, 'steps.ProjectStep']
) -> typing.Optional['steps.ProjectStep']:
"""
Selects the specified step by step object, step name or index if
such a step exists and returns that step if it does.
"""
if isinstance(step_name_or_index, steps.ProjectStep):
step = (
step_name_or_index
if step_name_or_index in self.steps
else None
)
elif isinstance(step_name_or_index, int):
index = min(len(self.steps) - 1, step_name_or_index)
step = self.steps[index]
else:
step = self.get_step(step_name_or_index or '')
if not step:
return None
for s in self.steps:
s.is_selected = (s == step)
return step
def make_remote_url(self, host: str = None):
"""..."""
clean_host = (host or '').rstrip('/')
return '{}/view/project.html?id={}'.format(clean_host, self.uuid)
def kernel_serialize(self):
"""..."""
return dict(
uuid=self.uuid,
stop_condition=self.stop_condition._asdict(),
last_modified=self.last_modified,
remote_source_directory=self.remote_source_directory,
source_directory=self.source_directory,
source_path=self.source_path,
output_directory=self.output_directory,
output_path=self.output_path,
url=self.url,
remote_slug=self.make_remote_url(),
title=self.title,
id=self.id,
steps=[s.kernel_serialize() for s in self.steps],
naming_scheme=self.naming_scheme
)
def refresh(self, force: bool = False) -> bool:
"""
Loads the cauldron.json definition file for the project and populates
the project with the loaded data. Any existing data will be overwritten,
if the new definition file differs from the previous one.
If the project has already loaded with the most recent version of the
cauldron.json file, this method will return without making any changes
to the project.
:param force:
If true the project will be refreshed even if the project file
modified timestamp doesn't indicate that it needs to be refreshed.
:return:
Whether or not a refresh was needed and carried out
"""
lm = self.last_modified
is_newer = lm is not None and lm >= os.path.getmtime(self.source_path)
if not force and is_newer:
return False
old_definition = self.settings.fetch(None)
new_definition = definitions.load_project_definition(
self.source_directory
)
if not force and old_definition == new_definition:
return False
self.settings.clear().put(**new_definition)
old_step_definitions = old_definition.get('steps', [])
new_step_definitions = new_definition.get('steps', [])
if not force and old_step_definitions == new_step_definitions:
return True
old_steps = self.steps
self.steps = []
for step_data in new_step_definitions:
matches = [s for s in old_step_definitions if s == step_data]
if len(matches) > 0:
index = old_step_definitions.index(matches[0])
self.steps.append(old_steps[index])
else:
self.add_step(step_data)
self.last_modified = time.time()
return True
def get_step(self, name: str) -> typing.Optional['steps.ProjectStep']:
"""Returns the step by name or None if no such step is found."""
for s in self.steps:
if s.definition.name == name:
return s
return None
def get_step_by_reference_id(
self,
reference_id: str
) -> typing.Union['steps.ProjectStep', None]:
"""Returns the step by its ID or None if no such step is found."""
for s in self.steps:
if s.reference_id == reference_id:
return s
return None
def index_of_step(self, name) -> typing.Union[int, None]:
"""
:param name:
:return:
"""
name = name.strip('"')
for index, s in enumerate(self.steps):
if s.definition.name == name:
return int(index)
return None
def add_step(
self,
step_data: typing.Union[str, dict],
index: int = None
) -> typing.Union['steps.ProjectStep', None]:
"""
:param step_data:
:param index:
:return:
"""
fd = file_definitions.FileDefinition(
data=step_data,
project=self,
project_folder=functools.partial(
self.settings.fetch,
'steps_folder'
)
)
if not fd.name:
self.last_modified = 0
return None
ps = steps.ProjectStep(self, fd)
if index is None:
self.steps.append(ps)
else:
if index < 0:
index %= len(self.steps)
self.steps.insert(index, ps)
if fd.name.endswith('.py'):
for i in range(self.steps.index(ps) + 1, len(self.steps)):
self.steps[i].mark_dirty(True)
self.last_modified = time.time()
return ps
def remove_step(self, name) -> typing.Union['steps.ProjectStep', None]:
"""
:param name:
:return:
"""
step = None
for ps in self.steps:
if ps.definition.name == name:
step = ps
break
if step is None:
return None
if step.definition.name.endswith('.py'):
for i in range(self.steps.index(step) + 1, len(self.steps)):
self.steps[i].mark_dirty(True)
self.steps.remove(step)
return step
def save(self, path: str = None):
"""
:param path:
:return:
"""
if not path:
path = self.source_path
self.settings.put(
steps=[ps.definition.serialize() for ps in self.steps]
)
data = self.settings.fetch(None)
with open(path, 'w+') as f:
json.dump(data, f, indent=2, sort_keys=True)
self.last_modified = time.time()
def write(self) -> str:
"""..."""
writing.save(self)
return self.url
def status(self) -> dict:
return dict(
id=self.id,
steps=[s.status() for s in self.steps],
stop_condition=self.stop_condition._asdict(),
last_modified=self.last_modified,
remote_slug=self.make_remote_url()
)
```
#### File: writing/components/__init__.py
```python
from cauldron.session import projects
from cauldron.session.writing.components import bokeh_component
from cauldron.session.writing.components import definitions
from cauldron.session.writing.components import plotly_component
from cauldron.session.writing.components import project_component
from cauldron.session.writing.components.definitions import COMPONENT
from cauldron.session.writing.components.definitions import WEB_INCLUDE
def _get_components(lib_name: str, project: 'projects.Project') -> COMPONENT:
if lib_name == 'bokeh':
return bokeh_component.create(project)
if lib_name == 'plotly':
return plotly_component.create(project)
# Unknown components will just return as empty components. There used
# to be a shared component type that was removed in 1.0.0, but hadn't
# been used for a long time before that. If that becomes interesting
# again old code can be reviewed to see how shared components once
# worked.
return COMPONENT([], [])
def get(step: 'projects.ProjectStep') -> COMPONENT:
"""..."""
return definitions.merge_components(
project_component.create_many(step.project, step.web_includes),
*[
_get_components(name, step.project)
for name in step.report.library_includes
],
)
```
#### File: cli/commands/test_ls.py
```python
from unittest.mock import MagicMock
from unittest.mock import patch
from cauldron import environ
from cauldron.test import support
from pytest import mark
SCENARIOS = [
{'active': False, 'project': None},
{'active': False, 'project': MagicMock()},
{'active': True, 'project': None},
{'active': True, 'project': MagicMock()},
]
@mark.parametrize('scenario', SCENARIOS)
@patch('cauldron.cli.commands.ls.os.name', new='nt')
@patch('cauldron.cli.commands.ls.os.path.exists')
@patch('cauldron.environ.remote_connection')
@patch('cauldron.project.get_internal_project')
def test_ls(
get_internal_project: MagicMock,
remote_connection: MagicMock,
os_path_exists: MagicMock,
scenario: dict
):
"""Should list the contents of the specified directory."""
os_path_exists.return_value = True
remote_connection.active = scenario['active']
get_internal_project.return_value = scenario['project']
path = environ.paths.resources('examples', 'hello_cauldron')
response = support.run_command('ls "{}"'.format(path))
assert response.success, 'Expect ls to succeed.'
data = response.data
assert path == data['current_directory'], """
Expect the current directory to be the one specified in
the command arguments.
"""
assert path.startswith(data['parent_directory'])
assert data['shortened_directory'].endswith('hello_cauldron')
assert 'hello_cauldron' == data['spec']['name'], """
Expect this to be a project directory and to load the
project spec.
"""
assert {'step_tests'} == {d['folder'] for d in data['children']}, """
Expect one child directory named 'step_test'.
"""
expected = {'cauldron.json', 'S01-create-data.py', 'S02-plot-data.py'}
assert expected == {f['name'] for f in data['current_files']}, """
Expect three specific files in the selected directory.
"""
offset = 1 if scenario['active'] or scenario['project'] else 0
assert 27 + offset == len(data['standard_locations']), """
Expect home directory, parent directory and one windows root
drive location for each letter of the alphabet except Z.
Also expect one for the project directory if a local or
remote project is open.
"""
@patch('cauldron.cli.commands.ls.os.listdir')
def test_ls_permissions_error(os_listdir: MagicMock):
"""Should fail to list directory due to lack of permissions."""
os_listdir.side_effect = PermissionError
path = environ.paths.resources('examples', 'hello_cauldron')
response = support.run_command('ls "{}"'.format(path))
assert support.has_error_code(response, 'PERMISSION_DENIED')
```
#### File: cli/commands/test_reload.py
```python
from unittest.mock import patch
from cauldron.test import support
from cauldron.test.support import scaffolds
class TestReload(scaffolds.ResultsTest):
"""..."""
def test_reload(self):
"""Should reload the currently opened project."""
support.run_command('open @examples:hello_cauldron --forget')
r = support.run_command('reload')
self.assertFalse(r.failed, 'should not have failed')
def test_no_open_project(self):
"""Should fail when no project is open."""
r = support.run_command('reload')
self.assertTrue(r.failed, 'should have failed')
self.assertEqual(r.errors[0].code, 'NO_PROJECT_FOUND')
@patch('time.sleep')
def test_missing_project_path(self, *args):
"""Should fail if the project directory does not exist."""
support.run_command('open @examples:hello_cauldron --forget')
with patch('os.path.exists') as path_exists:
path_exists.return_value = False
r = support.run_command('reload')
self.assertTrue(r.failed, 'should have failed')
self.assertEqual(r.errors[0].code, 'MISSING_PROJECT_PATH')
@patch('time.sleep')
def test_initialize_failure(self, *args):
"""Should fail if cannot initialize project."""
support.run_command('open @examples:hello_cauldron --forget')
with patch('cauldron.runner.initialize') as runner_initialize:
runner_initialize.side_effect = FileNotFoundError('Fake Error')
r = support.run_command('reload')
self.assertTrue(r.failed, 'should have failed')
self.assertEqual(r.errors[0].code, 'PROJECT_INIT_FAILURE')
def test_reload_remote(self):
"""Should reload the currently opened project."""
support.run_command('open @examples:hello_cauldron --forget')
r = support.run_remote_command('reload')
self.assertFalse(r.failed, 'should not have failed')
```
#### File: cli/commands/test_show.py
```python
from unittest.mock import MagicMock
from unittest.mock import patch
from pytest import mark
import cauldron
from cauldron import environ
from cauldron.test import support
lifecycle_fixture = support.make_project_lifecycle_fixture()
@patch('webbrowser.open')
def test_show_fail(
web_browser_open: MagicMock,
tester: support.ProjectLifecycleTester
):
"""Should fail to show when no project is opened."""
response = support.run_command('show')
assert support.has_error_code(response, 'NO_OPEN_PROJECT'), """
Expect failure with no open project.
"""
assert 0 == web_browser_open.call_count
@patch('webbrowser.open')
def test_show(
web_browser_open: MagicMock,
tester: support.ProjectLifecycleTester
):
"""Should show local project."""
support.open_project(tester, '@examples:hello_cauldron')
url = cauldron.project.get_internal_project().baked_url
response = support.run_command('show')
assert support.has_success_code(response, 'SHOWN'), """
Expect show to run without error.
"""
web_browser_open.assert_called_once_with(url)
@patch('webbrowser.open')
def test_show_remote(
web_browser_open: MagicMock,
tester: support.ProjectLifecycleTester
):
"""Should show remote url."""
support.open_project(tester, '@examples:hello_cauldron')
project = cauldron.project.get_internal_project()
remote_connection = environ.RemoteConnection(
url='http://my-fake.url',
active=True
)
url = project.make_remote_url(remote_connection.url)
response = support.run_remote_command(
command='show',
remote_connection=remote_connection
)
assert support.has_success_code(response, 'SHOWN'), """
Expect show to run without error.
"""
web_browser_open.assert_called_once_with(url)
@patch('subprocess.check_call')
@patch('cauldron.cli.commands.show.os')
@mark.parametrize('platform', ['darwin', 'linux2', 'win32'])
def test_show_files(
os_module: MagicMock,
check_call: MagicMock,
platform: str,
tester: support.ProjectLifecycleTester
):
"""Should show local project files."""
support.open_project(tester, '@examples:hello_cauldron')
with patch('sys.platform', new=platform):
response = support.run_command('show files')
assert support.has_success_code(response, 'SHOWN'), """
Expect show to run without error.
"""
assert 1 == (os_module.startfile.call_count + check_call.call_count)
@patch('subprocess.check_call')
@patch('cauldron.cli.commands.show.os')
@mark.parametrize('platform', ['darwin', 'linux2', 'win32'])
def test_show_files_remote(
os_module: MagicMock,
check_call: MagicMock,
platform: str,
tester: support.ProjectLifecycleTester
):
"""Should show local project files."""
support.open_project(tester, '@examples:hello_cauldron')
with patch('sys.platform', new=platform):
response = support.run_remote_command('show files')
assert support.has_success_code(response, 'SHOWN'), """
Expect show to run without error.
"""
assert 1 == (os_module.startfile.call_count + check_call.call_count)
```
#### File: cli/commands/test_steps_insert.py
```python
import cauldron
from cauldron.test import support
from cauldron.test.support import scaffolds
class TestStepsInsert(scaffolds.ResultsTest):
"""..."""
def test_before(self):
"""Should properly rename default filenames."""
support.create_project(self, 'candice')
support.add_step(self)
support.add_step(self, position='0')
project = cauldron.project.get_internal_project()
steps = project.steps
self.assertTrue(steps[0].filename.startswith('S01'))
self.assertTrue(steps[1].filename.startswith('S02'))
def test_multiple_file_types(self):
"""Should properly rename default filenames."""
support.create_project(self, 'candy')
support.add_step(self)
support.add_step(self, name='.md', position='0')
project = cauldron.project.get_internal_project()
steps = project.steps
self.assertTrue(steps[0].filename.startswith('S01'))
self.assertTrue(steps[1].filename.startswith('S02'))
def test_multiple_file_types_many(self):
"""Should properly rename default filenames."""
support.create_project(self, 'candy')
support.add_step(self)
support.add_step(self)
support.add_step(self)
support.add_step(self, name='.md', position='0')
project = cauldron.project.get_internal_project()
steps = project.steps
self.assertTrue(steps[0].filename.startswith('S01'))
self.assertTrue(steps[1].filename.startswith('S02'))
self.assertTrue(steps[2].filename.startswith('S03'))
self.assertTrue(steps[3].filename.startswith('S04'))
def test_multiple_file_types_named(self):
"""Should properly rename customized filenames."""
support.create_project(self, 'candera')
support.add_step(self, name='A')
support.add_step(self, name='B')
support.add_step(self, name='C')
support.add_step(self, name='D.md', position='0')
project = cauldron.project.get_internal_project()
steps = project.steps
self.assertTrue(steps[0].filename.startswith('S01-D'))
self.assertTrue(steps[1].filename.startswith('S02'))
self.assertTrue(steps[2].filename.startswith('S03'))
self.assertTrue(steps[3].filename.startswith('S04'))
```
#### File: cli/interaction/test_interaction_query.py
```python
from unittest import mock
from cauldron.cli.interaction import query
from cauldron.test.support import scaffolds
class TestRenderTexts(scaffolds.ResultsTest):
"""..."""
def test_choice(self):
"""
:return:
"""
with mock.patch('builtins.input', return_value=''):
index, value = query.choice(
title='Some input',
prompt='Here are your choices',
choices=['a', 'b', 'c', 'd'],
default_index=2
)
self.assertEqual(index, 2)
self.assertEqual(value, 'c')
def test_confirm(self):
"""
:return:
"""
with mock.patch('builtins.input', return_value='y'):
result = query.confirm(
question='Ja order Nein',
default=False
)
self.assertTrue(result)
with mock.patch('builtins.input', return_value='no'):
result = query.confirm(
question='Ja order Nein',
default=False
)
self.assertFalse(result)
with mock.patch('builtins.input', return_value=''):
result = query.confirm(
question='Ja order Nein',
default=False
)
self.assertFalse(result)
```
#### File: routes/synchronize/test_status.py
```python
import os
import cauldron
from cauldron.cli.server.routes.synchronize import status
from cauldron.test import support
from cauldron.test.support import scaffolds
MY_PATH = os.path.realpath(__file__)
MY_DIRECTORY = os.path.dirname(os.path.realpath(__file__))
class TestStatus(scaffolds.ResultsTest):
def test_of_file_missing(self):
"""Should return empty result for file that does not exist."""
path = os.path.join(MY_DIRECTORY, 'fictional.file')
result = status.of_file(path)
self.assertEqual(result['modified'], -1)
self.assertEqual(result['path'], path)
def test_of_file(self):
"""Should return valid result for my file."""
result = status.of_file(MY_PATH)
self.assertNotEqual(result['modified'], -1)
self.assertEqual(result['path'], MY_PATH)
def test_of_directory(self):
"""Should return status information for my directory."""
results = status.of_directory(MY_DIRECTORY)
self.assertTrue('__init__.py' in results)
self.assertTrue(os.path.basename(__file__) in results)
for key, result in results.items():
self.assertNotEqual(result['modified'], -1)
def test_of_project(self):
"""Should return information about the project."""
support.create_project(self, 'eric')
project = cauldron.project.get_internal_project()
results = status.of_project(project)
self.assertEqual(len(results['libraries']), 3)
self.assertEqual(len(results['project'].keys()), 2, """
Expect one entry for cauldron.json and one entry for the S01.py
step created by default with a new project.
""")
```
#### File: cli/server/test_server_execution.py
```python
from collections import namedtuple
from unittest.mock import MagicMock
from unittest.mock import patch
from cauldron.environ.response import Response
from cauldron.test.support import flask_scaffolds
class TestServerExecution(flask_scaffolds.FlaskResultsTest):
"""..."""
def test_execute_sync(self):
"""Should execute command synchronously."""
opened = self.post('/command-sync', {'command': 'open', 'args': ''})
self.assertEqual(opened.flask.status_code, 200)
def test_execute_get(self):
"""Should execute using get passed data """
opened = self.get('/command-sync?&command=open')
self.assertEqual(opened.flask.status_code, 200)
def test_execute_wait(self):
"""Should wait for execution to complete when synchronous."""
FakeThread = namedtuple('FakeThread_NT', ['uid', 'join', 'is_alive'])
thread = FakeThread(
uid='FAKE-UID',
join=MagicMock(),
is_alive=MagicMock(return_value=False)
)
def execute_replacement(name, args, response: Response):
response.thread = thread
patch_target = 'cauldron.cli.commander.execute'
with patch(patch_target, wraps=execute_replacement) as execute:
opened = self.get('/command-sync?&command=open')
self.assertEqual(execute.call_count, 1)
self.assertEqual(opened.flask.status_code, 200)
self.assertGreater(thread.join.call_count, 0)
self.assertGreater(thread.is_alive.call_count, 0)
self.assertFalse(opened.response.failed)
def test_shutdown_not_running(self):
"""Should abort shutdown of non-running server."""
shutdown = self.get('/shutdown')
self.assert_has_error_code(shutdown.response, 'NOT_RUNNING_ERROR')
@patch('cauldron.cli.commander.execute')
def test_execute_failure(self, execute: MagicMock):
"""Should fail when execution fails."""
execute.side_effect = RuntimeError('FAKE ERROR')
opened = self.get('/command-sync?&command=open&args=+')
self.assertEqual(opened.flask.status_code, 200)
self.assert_has_error_code(opened.response, 'KERNEL_EXECUTION_FAILURE')
def test_shutdown(self):
"""Should abort the running server."""
shutdown_func = MagicMock()
shutdown = self.get(
'/shutdown',
environ_base={'werkzeug.server.shutdown': shutdown_func}
)
shutdown_func.assert_called_once_with()
self.assertFalse(shutdown.response.failed)
def test_shutdown_failed(self):
"""Should abort the running server."""
shutdown_func = MagicMock()
shutdown_func.side_effect = RuntimeError('FAKE ERROR')
shutdown = self.get(
'/shutdown',
environ_base={'werkzeug.server.shutdown': shutdown_func}
)
shutdown_func.assert_called_once_with()
self.assert_has_error_code(shutdown.response, 'SHUTDOWN_ERROR')
@patch('cauldron.cli.server.routes.execution.server_runner')
def test_abort_not_a_response(
self,
server_runner: MagicMock,
):
"""Should ignore non-response entries during abort."""
server_runner.active_execution_responses = {'foo': None}
self.get('/abort')
@patch('cauldron.cli.server.routes.execution.server_runner')
def test_abort_no_thread(
self,
server_runner: MagicMock,
):
"""Should work when no thread for active response."""
active_response = MagicMock()
active_response.thread = None
server_runner.active_execution_responses = {'foo': active_response}
self.get('/abort')
@patch('cauldron.cli.server.routes.execution.server_runner')
def test_abort_cannot_stop(
self,
server_runner: MagicMock,
):
"""Should succeed even when thread could not be stopped."""
active_response = MagicMock()
active_response.thread.abort_running.side_effect = ValueError
server_runner.active_execution_responses = {'foo': active_response}
self.get('/abort')
```
#### File: cli/sync/test_sync_comm.py
```python
import os
from unittest.mock import MagicMock
from unittest.mock import patch
import requests
from requests import Response as HttpResponse
from cauldron import environ
from cauldron.cli.sync import comm
from cauldron.test import support
from cauldron.test.support import scaffolds
class TestSyncComm(scaffolds.ResultsTest):
"""Test suite for the cauldron.cli.sync.sync_comm module"""
def test_assemble_url_without_connection(self):
"""Should assemble url"""
endpoint = '/some-endpoint'
url_assembled = comm.assemble_url(endpoint)
self.assertEqual(
url_assembled,
'http://localhost:5010{}'.format(endpoint)
)
def test_assemble_url_specified_connection(self):
"""Should assemble url using the specified remote connection data"""
url = 'some.url:5451'
endpoint = '/some-endpoint'
remote_connection = environ.RemoteConnection(url=url, active=True)
url_assembled = comm.assemble_url(endpoint, remote_connection)
self.assertEqual(url_assembled, 'http://{}{}'.format(url, endpoint))
def test_assemble_url_global_connection(self):
"""Should assemble url using the specified remote connection data"""
url = 'some.url:5451'
endpoint = '/some-endpoint'
support.run_command('connect {} --force'.format(url))
url_assembled = comm.assemble_url(endpoint)
self.assertEqual(url_assembled, 'http://{}{}'.format(url, endpoint))
support.run_command('disconnect')
@patch('requests.request')
def test_send_request_invalid(self, request: MagicMock):
"""Should fail to send request."""
request.side_effect = requests.ConnectionError('Fake Error')
response = comm.send_request('/fake', method='GET')
self.assert_has_error_code(response, 'COMMUNICATION_ERROR')
@patch('cauldron.cli.sync.comm.parse_http_response')
@patch('requests.request')
def test_send_request_valid(
self,
request: MagicMock,
parse_http_response: MagicMock
):
"""Should successfully send request."""
request.return_value = HttpResponse()
parse_http_response.return_value = environ.Response('test')
response = comm.send_request(
endpoint='/fake',
method='post',
data=dict(a=1, b=2)
)
self.assertEqual(response.identifier, 'test')
def test_parse_valid_http_response(self):
"""Should fail to send request."""
source_response = environ.Response().update(test='hello_world')
def json_mock(*args, **kwargs):
return source_response.serialize()
http_response = HttpResponse()
http_response.json = json_mock
response = comm.parse_http_response(http_response)
self.assertEqual(
source_response.data['test'],
response.data['test']
)
def test_parse_invalid_http_response(self):
"""Should fail to parse invalid http response"""
http_response = HttpResponse()
response = comm.parse_http_response(http_response)
self.assert_has_error_code(response, 'INVALID_REMOTE_RESPONSE')
self.assertEqual(http_response, response.http_response)
@patch('requests.get')
def test_failed_download(self, requests_get: MagicMock):
"""Should fail to download if the GET request raises an exception"""
requests_get.side_effect = IOError('FAKE ERROR')
path = self.get_temp_path('failed_download', 'fake.filename')
response = comm.download_file('fake.filename', path)
self.assert_has_error_code(response, 'CONNECTION_ERROR')
self.assertFalse(os.path.exists(path))
@patch('requests.get')
def test_failed_download_write(self, requests_get: MagicMock):
"""Should fail to download if the GET request raises an exception"""
requests_get.return_value = dict()
path = self.get_temp_path('failed_download', 'fake.filename')
with patch('builtins.open') as open_func:
open_func.side_effect = IOError('Fake Error')
response = comm.download_file('fake.filename', path)
self.assert_has_error_code(response, 'WRITE_ERROR')
self.assertFalse(os.path.exists(path))
@patch('requests.get')
def test_download(self, requests_get: MagicMock):
"""Should successfully download saved cauldron file"""
def mock_iter_content(*args, **kwargs):
yield from [b'a', b'b', b'', None, b'c']
http_response = HttpResponse()
http_response.iter_content = mock_iter_content
requests_get.return_value = http_response
path = self.get_temp_path('failed_download', 'fake.filename')
response = comm.download_file('fake.filename', path)
self.assertTrue(response.success)
self.assertTrue(os.path.exists(path))
with open(path, 'rb') as f:
contents = f.read()
self.assertEqual(contents, b'abc')
```
#### File: test/environ/test_logger.py
```python
from unittest.mock import patch
from unittest.mock import MagicMock
from cauldron.environ import logger
@patch('cauldron.environ.logger.log')
def test_header_zero(log: MagicMock):
"""Should log a level zero header without modification"""
logger.header('hello', level=0)
args = log.call_args[0]
assert 'hello' == args[0], 'Message should not be modified'
@patch('cauldron.environ.logger.log')
def test_header_infinity(log: MagicMock):
"""Should log a high level header without modification"""
logger.header('hello', level=8)
args = log.call_args[0]
assert 'hello' == args[0], 'Message should not be modified'
@patch('cauldron.environ.logger.raw')
def test_log_with_kwargs(raw: MagicMock):
"""Should include kwargs in log output."""
message = logger.log('test', foo=42)
assert 1 == raw.call_count
assert 0 < message.find('foo: 42'), """
Expected to find the foo kwarg in the message.
"""
@patch('traceback.extract_tb')
def test_get_error_stack_module(extract_tb: MagicMock):
"""Should nullify location when the location is module"""
frame = MagicMock()
frame.name = '<module>'
extract_tb.return_value = [frame]
result = logger.get_error_stack()
assert result[0]['location'] is None, """
Expected a <module> value to be changed to `None`.
"""
@patch('traceback.extract_tb')
def test_get_error_stack(extract_tb: MagicMock):
"""Should remove prefix when location is a remote shared library path"""
frame = MagicMock()
frame.name = '/tmp/cd-remote/__cauldron_shared_libs/test'
extract_tb.return_value = [frame]
result = logger.get_error_stack()
assert result[0]['location'] == '/test', """
Expected the remote prefix to be removed.
"""
```
#### File: test/environ/test_modes.py
```python
import itertools
import typing
from unittest.mock import patch
from cauldron.environ import modes
from pytest import mark
POSSIBILITIES = {
'is_ui': modes.UI,
'is_test': modes.TESTING,
'is_interactive': modes.INTERACTIVE,
'is_single_run': modes.SINGLE_RUN,
'is_server': modes.SERVER,
}
SCENARIOS = [
dict(combination)
for combination
in itertools.combinations_with_replacement(POSSIBILITIES.items(), 2)
]
@mark.parametrize('scenario', SCENARIOS)
def test_modes(scenario: typing.Dict[str, str]):
"""Should identify according to the expected results."""
em = modes.ExposedModes
patch_path = 'cauldron.environ.modes._current_modes'
with patch(patch_path, new=[]):
for m in scenario.values():
modes.add(m)
assert em.is_interactive() == ('is_interactive' in scenario)
assert em.is_server() == ('is_server' in scenario)
assert em.is_single_run() == ('is_single_run' in scenario)
assert em.is_test() == ('is_test' in scenario)
assert em.is_ui() == ('is_ui' in scenario)
for m in scenario.values():
modes.remove(m)
assert not modes._current_modes
```
#### File: test/render/test_render_encoding.py
```python
import unittest
import json
import datetime
import numpy as np
import pandas as pd
from cauldron.render.encoding import ComplexJsonEncoder
class TestRenderEncoding(unittest.TestCase):
def test_standard_types(self):
"""Should serialize bytes."""
source = dict(a='hello', b=True, c=3.14)
output = json.dumps(source, cls=ComplexJsonEncoder)
self.assertIsInstance(output, str)
def test_bytes(self):
"""Should serialize bytes."""
source = dict(key=b'<KEY>')
output = json.dumps(source, cls=ComplexJsonEncoder)
self.assertIsInstance(output, str)
def test_pandas_series(self):
"""Should serialize bytes."""
source = dict(key=pd.Series([1, 2, 3, 4, 5]))
output = json.dumps(source, cls=ComplexJsonEncoder)
self.assertIsInstance(output, str)
def test_date(self):
"""Should serialize datetime.date."""
source = dict(key=datetime.date(2016, 1, 1))
output = json.dumps(source, cls=ComplexJsonEncoder)
self.assertIsInstance(output, str)
def test_datetime(self):
"""Should serialize datetime.datetime."""
source = dict(key=datetime.datetime(2007, 7, 16))
output = json.dumps(source, cls=ComplexJsonEncoder)
self.assertIsInstance(output, str)
def test_time(self):
"""Should serialize datetime.time."""
source = dict(key=datetime.time(8, 7, 16))
output = json.dumps(source, cls=ComplexJsonEncoder)
self.assertIsInstance(output, str)
def test_timedelta(self):
"""Should serialize datetime.timedelta """
delta = (
datetime.datetime(2016, 4, 4) -
datetime.datetime(2014, 3, 12)
)
source = dict(key=delta)
output = json.dumps(source, cls=ComplexJsonEncoder)
self.assertIsInstance(output, str)
def test_ndarray(self):
"""Should serialize numpy.ndarray."""
source = dict(key=np.zeros([3, 3]))
output = json.dumps(source, cls=ComplexJsonEncoder)
self.assertIsInstance(output, str)
def test_numpy_ints(self):
"""Should serialize numpy int types."""
source = dict(
key8=np.int8(12),
key16=np.int16(12),
key32=np.int32(12),
key64=np.int64(12)
)
output = json.dumps(source, cls=ComplexJsonEncoder)
self.assertIsInstance(output, str)
def test_numpy_floats(self):
"""Should serialize numpy float types."""
source = dict(
key16=np.float16(np.pi),
key32=np.float32(np.pi),
key64=np.float64(np.pi)
)
output = json.dumps(source, cls=ComplexJsonEncoder)
self.assertIsInstance(output, str)
def test_odd_dates(self):
"""
Should convert to iso strings where numpy or pandas datetimes are found.
"""
dt64 = np.datetime64('2002-06-28T01:00:00')
source = dict(
datetime64=dt64,
timestamp=pd.Timestamp(dt64)
)
output = json.dumps(source, cls=ComplexJsonEncoder)
self.assertIsInstance(output, str)
self.assertEqual(2, output.count('2002-06-28T01:00:00'))
```
#### File: test/render/test_syntax_highlighting.py
```python
from unittest import TestCase
from pygments.lexers.python import Python3Lexer
from cauldron.render import syntax_highlighting
# Pygments has recently introduced Python2Lexer and made
# PythonLexer the Python3Lexer. For maximum compatibility
# of the test, the assertions will allow any of the lexers
# to be used.
# https://pygments.org/docs/lexers/#pygments.lexers.python.Python2Lexer
PYTHON_LEXER_CLASS_NAMES = [
'Python2Lexer',
'PythonLexer',
'Python3Lexer'
]
class TestSyntaxHighlighting(TestCase):
def test_source(self):
"""Should retrieve python lexer by source."""
with open(__file__, 'r') as f:
contents = f.read()
lexer = syntax_highlighting.fetch_lexer(contents)
self.assertIn(lexer.__class__.__name__, PYTHON_LEXER_CLASS_NAMES)
def test_language_python3(self):
"""Should retrieve python 3 lexer by language."""
lexer = syntax_highlighting.fetch_lexer('', 'python3')
self.assertIsInstance(lexer, Python3Lexer)
def test_filename_python(self):
"""Should retrieve python lexer by filename."""
lexer = syntax_highlighting.fetch_lexer('', 'fake', 'test.py')
self.assertIn(lexer.__class__.__name__, PYTHON_LEXER_CLASS_NAMES)
def test_mime_type_python(self):
"""Should retrieve python lexer by filename."""
lexer = syntax_highlighting.fetch_lexer(
'',
mime_type='application/x-python'
)
self.assertIn(lexer.__class__.__name__, PYTHON_LEXER_CLASS_NAMES)
def test_unknown_language(self):
"""Should retrieve a default lexer for an unknown language."""
lexer = syntax_highlighting.fetch_lexer('', 'lkjasdlkjsad')
self.assertIsNotNone(lexer)
def test_unknown_everything(self):
"""Should retrieve a default lexer for an unknown language."""
lexer = syntax_highlighting.fetch_lexer(
source='asdlkasdj',
language='lkjasdlkjsad',
filename='test.qweoihwq',
mime_type='fictional/lasdlkjad'
)
self.assertIsNotNone(lexer)
```
#### File: session/writing/test_file_io_move.py
```python
import os
from unittest.mock import MagicMock
from unittest.mock import patch
import pytest
from cauldron.session.writing import file_io
@patch('os.path.exists')
@patch('os.chdir')
@patch('shutil.move')
@patch('subprocess.run')
@patch('time.sleep')
def test_move_failure(
time_sleep: MagicMock,
subprocess_run: MagicMock,
shutil_move: MagicMock,
os_chdir: MagicMock,
path_exists: MagicMock,
):
"""Should fail to move the file by any means."""
path_exists.return_value = True
shutil_move.side_effect = FileExistsError
subprocess_run.side_effect = [
MagicMock(stdout=b'foo', returncode=0),
MagicMock(stdout=b'foo', returncode=0),
ValueError,
ValueError,
MagicMock(check_returncode=MagicMock(side_effect=ValueError))
]
path = os.path.realpath(__file__)
with pytest.raises(IOError):
file_io.move(file_io.FILE_COPY_ENTRY(
source=path,
destination='{}.shouldnotexist'.format(path)
))
assert 6 == time_sleep.call_count, """
Expect to sleep on all 6 failed retry attempts.
"""
assert 3 == shutil_move.call_count, """
Expect 3 attempts to move the file with shutil.
"""
assert 5 == subprocess_run.call_count, """
Expect 3 attempts to move the file with git and two
calls to determine that the source and output locations
are both under the same git project.
"""
assert 9 == os_chdir.call_count, """
Expect 3 calls to probe git version control and
then 6 calls during the git move attempts.
"""
@patch('os.utime')
@patch('os.path.exists')
@patch('os.chdir')
@patch('shutil.move')
@patch('subprocess.run')
@patch('time.sleep')
def test_move_git(
time_sleep: MagicMock,
subprocess_run: MagicMock,
shutil_move: MagicMock,
os_chdir: MagicMock,
path_exists: MagicMock,
utime: MagicMock,
):
"""Should move the file using git."""
path_exists.return_value = True
subprocess_run.side_effect = [
MagicMock(stdout=b'bar', returncode=0),
MagicMock(stdout=b'bar', returncode=0),
ValueError,
MagicMock() # this attempt works
]
path = os.path.realpath(__file__)
file_io.move(file_io.FILE_COPY_ENTRY(
source=path,
destination='{}.shouldnotexist'.format(path)
))
assert 1 == time_sleep.call_count, """
Expect to sleep once a the first git move attempt.
"""
assert 0 == shutil_move.call_count, """
Expect no attempts to move the file with shutil.
"""
assert 4 == subprocess_run.call_count, """
Expect 2 attempts to move the file with git and two
calls to determine that the source and output locations
are both under the same git project.
"""
assert 7 == os_chdir.call_count, """
Expect 3 calls to probe git version control and
then 4 more calls during the git move attempts.
"""
assert 0 < utime.call_count, """
Expect that the moved file gets touched to a new uptime so
that cauldron can see that the file has changed.
"""
@patch('os.utime')
@patch('os.path.exists')
@patch('os.chdir')
@patch('shutil.move')
@patch('subprocess.run')
@patch('time.sleep')
def test_move_no_git(
time_sleep: MagicMock,
subprocess_run: MagicMock,
shutil_move: MagicMock,
os_chdir: MagicMock,
path_exists: MagicMock,
utime: MagicMock,
):
"""Should move the file with shutil.move."""
path_exists.return_value = True
shutil_move.side_effect = [ValueError, MagicMock()]
subprocess_run.side_effect = [
MagicMock(stdout=b'foo', returncode=0),
MagicMock(stdout=b'bar', returncode=0),
]
path = os.path.realpath(__file__)
file_io.move(file_io.FILE_COPY_ENTRY(
source=path,
destination='{}.shouldnotexist'.format(path)
))
assert 1 == time_sleep.call_count, """
Expect to sleep once a the first shutil move attempt.
"""
assert 2 == shutil_move.call_count, """
Expect 2 attempts to move the file with shutil.
"""
assert 2 == subprocess_run.call_count, """
Expect 2 attempts to move the file with git and two
calls to determine that the source and output locations
are not under the same git project.
"""
assert 3 == os_chdir.call_count, """
Expect 3 calls to probe git version control.
"""
assert 0 < utime.call_count, """
Expect that the moved file gets touched to a new uptime so
that cauldron can see that the file has changed.
"""
```
#### File: test/support/functional.py
```python
import tempfile
import pytest
import cauldron
from cauldron import cli
from cauldron import environ
from cauldron.cli import commander
from cauldron.cli.commands import close
class ProjectLifecycleTester:
def __init__(self):
self.results_directory = None
self.temp_directories = dict()
def set_up(self):
"""Called before the test process begins."""
results_directory = tempfile.mkdtemp(
prefix='cd-test-results-{}--'.format(self.__class__.__name__)
)
self.results_directory = results_directory
environ.configs.put(results_directory=results_directory, persists=False)
self.temp_directories = dict()
def tear_down(self):
"""Called after the test process is complete."""
# Close any open project so that it doesn't persist to the next test
if cauldron.project.internal_project is not None:
close.execute(cli.make_command_context('close'))
environ.configs.remove('results_directory', include_persists=False)
environ.systems.remove(self.results_directory)
self.results_directory = None
for key, path in self.temp_directories.items(): # pragma: no cover
environ.systems.remove(path)
if cauldron.environ.remote_connection.active: # pragma: no cover
commander.execute('disconnect', '')
def make_project_lifecycle_fixture(fixture_name: str = 'tester'):
"""..."""
@pytest.fixture(name=fixture_name)
def project_lifecycle_fixture():
tester = ProjectLifecycleTester()
tester.set_up()
yield tester
tester.tear_down()
return project_lifecycle_fixture
```
#### File: test/support/mocking.py
```python
import builtins
import typing
from unittest.mock import MagicMock
from unittest.mock import patch
_reserved_import = builtins.__import__
def populate_open_mock(mocked_open: MagicMock) -> MagicMock:
"""
Populates the specified MagicMock configured for use in mocking an `open`
object used as a ContextManager:
with open('foo.file') as f:
f.read()
Such that it can be patched as:
@patch('cauldron.path.to.file.open')
def test_something(opener: MagickMock):
populate_open_mock(opener)
opener.mocked_file.read.return_value = 'foo'
"""
file = MagicMock(name='mocked_open.mocked_file')
context = MagicMock(name='mocked_open.mocked_context')
context.__enter__.return_value = file
mocked_open.mocked_file = file
mocked_open.mocked_context = context
mocked_open.return_value = context
return mocked_open
class MockImporter:
"""Mocks the 'builtins.__import__ function."""
def __init__(
self,
error_on: typing.List[str] = None,
error_message: str = 'Mock Import Error'
):
self.error_on = error_on or []
self.error_message = error_message
def __call__(self, *args, **kwargs):
if args and args[0] in self.error_on:
raise ImportError(self.error_message)
return _reserved_import(*args, **kwargs)
class ImportPatcher:
"""Patches the 'builtins.__import__ function with a MockImporter."""
def __init__(self):
self.mock_importer = MockImporter([])
self._patch = patch(
'builtins.__import__',
new=self.mock_importer
)
def __enter__(self):
self._patch.__enter__()
return self.mock_importer
def __exit__(self, *args, **kwargs):
return self._patch.__exit__(*args)
```
#### File: test/support/server.py
```python
import json
import typing
from flask import Response as FlaskResponse
from cauldron import environ
from cauldron.cli import server
Responses = typing.NamedTuple('TestResponses', [
('flask', FlaskResponse),
('response', 'environ.Response')
])
def create_test_app():
"""..."""
return server.server_run.APPLICATION.test_client()
def get(app, endpoint: str, **kwargs) -> Responses:
""" send get request to the test flask application."""
flask_response = app.get(endpoint, **kwargs)
response = deserialize_flask_response(flask_response)
return Responses(flask_response, response)
def post(app, endpoint: str, data=None, **kwargs) -> Responses:
""" send post request to the test flask application."""
args = json.dumps(data) if data else None
flask_response = app.post(
endpoint,
data=args,
content_type='application/json',
**kwargs
)
response = deserialize_flask_response(flask_response)
return Responses(flask_response, response)
def deserialize_flask_response(
flask_response: FlaskResponse
) -> 'environ.Response':
"""..."""
try:
data = json.loads(flask_response.data.decode('utf-8', 'ignore'))
response = environ.Response.deserialize(data)
except Exception as error:
response = environ.Response().fail(
code='DESERIALIZE_FLASK_RESPONSE',
message='Failed to deserialize flask response',
error=error
)
return response
```
#### File: cauldron/test/test_templating.py
```python
import unittest
from cauldron import templating
from cauldron import environ
class TestTemplating(unittest.TestCase):
def test_id_filter(self):
"""..."""
result = templating.render('{{ "test" | id }}')
parts = result.split('-', 2)
self.assertEqual(
parts[0], 'cdi',
msg='"{}" should start with "cdi"'.format(result)
)
self.assertEqual(
parts[1], 'test',
msg='"{}" should match the prefix'.format(result)
)
def test_latex_filter(self):
"""..."""
result = templating.render('{{ "e = mc^2" | latex }}')
self.assertNotEqual(result.find('katex'), -1, 'where is katex?')
def test_render_template(self):
"""
:return:
"""
result = templating.render_template('unit_test.html', value='hello')
self.assertEqual(result, 'hello')
def test_render_file(self):
"""
:return:
"""
result = templating.render_file(
environ.paths.package('resources', 'templates', 'unit_test.html'),
value='hello'
)
self.assertEqual(result, 'hello')
```
#### File: apis/executions/test_runner_abort.py
```python
from unittest.mock import MagicMock
from unittest.mock import patch
from cauldron.ui.routes.apis.executions import runner
@patch('cauldron.ui.routes.apis.executions.runner.ui_configs')
@patch('cauldron.ui.routes.apis.executions.runner.redirection')
@patch('cauldron.project.get_internal_project')
def test_abort(
get_internal_project: MagicMock,
redirection: MagicMock,
ui_configs: MagicMock,
):
"""Should carry out an abort process on the active response thread."""
active_response = MagicMock()
active_response.thread.abort_running.side_effect = ValueError
ui_configs.ACTIVE_EXECUTION_RESPONSE = active_response
step = MagicMock()
step.is_running = True
project = MagicMock()
project.current_step = step
get_internal_project.return_value = project
response = runner.abort()
assert response.success
assert redirection.disable.called
assert redirection.restore_default_configuration.called
assert not step.is_running, """
Expect the current step to be stopped given that
it was running.
"""
```
#### File: routes/apps/test_view.py
```python
from unittest.mock import MagicMock
from unittest.mock import patch
import flask
from cauldron.ui import configs
from cauldron.ui.routes import apps
from pytest import mark
test_app = flask.Flask(__name__)
test_app.register_blueprint(apps.blueprint)
SCENARIOS = [
{'exists': True, 'endpoint': '', 'match': 'index.html'},
{'exists': True, 'endpoint': 'foo.js', 'match': 'foo.js'},
{'exists': False, 'endpoint': 'foo.js'},
]
@mark.parametrize('scenario', SCENARIOS)
@patch('cauldron.ui.routes.apps.os.path.exists')
@patch('cauldron.ui.routes.apps.flask.send_file')
def test_view(
flask_send_file: MagicMock,
exists: MagicMock,
scenario: dict,
):
"""Should return app file based on the scenario."""
flask_send_file.return_value = flask.Response()
exists.return_value = scenario['exists']
client = test_app.test_client()
response = client.get('{}/app/{}'.format(
configs.ROOT_PREFIX,
scenario['endpoint']
))
code = 200 if scenario['exists'] else 204
assert 1 == exists.call_count
assert code == response.status_code, """
Expect the default success response to be returned.
"""
if scenario['exists']:
path = flask_send_file.call_args[0][0]
assert path.endswith(scenario['match'])
else:
assert not flask_send_file.called
```
#### File: routes/notebooks/test_get_remote_view.py
```python
from unittest.mock import MagicMock
from unittest.mock import patch
from cauldron.ui.routes import notebooks
@patch('cauldron.ui.routes.notebooks.flask')
@patch('cauldron.ui.routes.notebooks.requests.request')
def test_get_remote_view(
requests_request: MagicMock,
mock_flask: MagicMock,
):
"""Should retrieve remote view via request."""
remote_response = MagicMock()
remote_response.raw.headers = {'foo': 'bar'}
remote_response.content = 'hello'
remote_response.status_code = 200
requests_request.return_value = remote_response
response = notebooks._get_remote_view('foo.js')
assert response is not None
args = mock_flask.Response.call_args[0]
assert 'hello' == args[0]
assert 200 == args[1]
assert ('foo', 'bar') in list(args[2])
```
#### File: cauldron/ui/configs.py
```python
import os
import typing
from cauldron import environ
DEFAULT_PORT = 8899
LAUNCH_THREAD = None
ACTIVE_EXECUTION_RESPONSE = None # type: typing.Optional[environ.Response]
#: The root URL prefix for the UI
ROOT_PREFIX = '/v1'
#: UI Version
UI_VERSION = [0, 0, 1, 1]
UI_APP_DATA = dict(
version=UI_VERSION,
user=os.environ.get('USER'),
test=1,
pid=os.getpid()
)
# Count of the number of consecutive UI get status failures.
# Will reset to zero once a successful status response has
# been returned. See cauldron/environ/response.py for details.
status_failures = 0
def is_active_async() -> bool:
"""
Determines whether or not an async command execution is currently
underway within the UI execution environment.
"""
r = ACTIVE_EXECUTION_RESPONSE
return r is not None and r.thread and r.thread.is_alive()
```
#### File: cauldron/ui/launcher.py
```python
import typing
import socket
import threading
import time
import webbrowser
def _check_usage(host: str, port: int) -> bool:
"""
Checks to see whether or not the specified port is utilized
and returns a boolean indicating whether it is or not.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return not bool(sock.connect_ex((host, port)))
def find_open_port(
host: str,
ports: typing.Iterable[int]
) -> typing.Optional[int]:
"""
Finds the first open port on the specified host that is not
currently being utilized from the iterable containing possible
port options and returns that value. If none of the ports are
available a None value is returned.
"""
return next((p for p in ports if not _check_usage(host, p)), None)
class OpenUiOnStart(threading.Thread):
"""
Thread used to monitor the UI port for a point where the Flask
web server is actively serving on the targeted port. When that
occurs the UI is automatically opened in a new browser window.
"""
def __init__(self, host: str, port: int):
super(OpenUiOnStart, self).__init__()
self.host = host or '127.0.0.1'
self.port = port
self.retries = 0
@property
def root_url(self) -> str:
"""URL of the UI to open when the UI app starts serving."""
return 'http://{host}:{port}/'.format(host=self.host, port=self.port)
def run(self):
"""
Execution loop for the thread, which polls the UI port until
it responds and then opens the UI in a web browser when that
happens. The polling process will last up to 25 seconds after
which it will give up.
"""
while self.retries < 100:
if _check_usage(self.host, self.port):
webbrowser.open_new(self.root_url)
break
else:
self.retries += 1
time.sleep(0.25)
```
#### File: cauldron/ui/parsing.py
```python
from argparse import ArgumentParser
def create_parser(
arg_parser: ArgumentParser = None,
shell: bool = False
) -> ArgumentParser:
"""
Creates an argument parser populated with the arg formats for the server
command.
"""
parser = arg_parser or ArgumentParser()
parser.description = 'Cauldron kernel server'
parser.add_argument(
'-p', '--port',
dest='port',
type=int,
default=None,
help=(
'Port on which the UI should interact. If not specified '
'an open port will be found and used instead.'
)
)
parser.add_argument(
'-n', '--name', '--host',
dest='host',
type=str,
default=None
)
parser.add_argument(
'--public',
default=False,
action='store_true'
)
if not shell:
parser.add_argument(
'-d', '--debug',
dest='debug',
default=False,
action='store_true'
)
parser.add_argument(
'-v', '--version',
dest='version',
default=False,
action='store_true'
)
parser.add_argument(
'-c', '--connect', '--connection',
dest='connection_url'
)
parser.add_argument(
'--basic',
action='store_true',
help="""
When specified a basic Flask server will be used to
serve the kernel instead of a waitress WSGI server.
Use only when necessary as the Flask server isn't
as robust.
"""
)
return parser
```
#### File: apis/executions/runner.py
```python
import typing
import flask
import cauldron
from cauldron import environ
from cauldron.cli import commander
from cauldron.environ.response import Response
from cauldron.runner import redirection
from cauldron.ui import arguments
from cauldron.ui import configs as ui_configs
ParsedCommand = typing.NamedTuple('COMMAND_CONTEXT', [
('response', Response),
('command', str),
('args', list),
])
def parse_command_args() -> environ.Response:
"""
Parse arguments for command executions.
:param response:
The response object to modify with status or error data
:return:
A tuple where the first element is the name of the command
to execute, and the second is a string representing the arguments
to apply to that command.
"""
response = environ.Response()
cmd = None
parts = None
name = None
args = None
request_args = arguments.from_request()
try:
cmd = request_args.get('command', '')
parts = [x.strip() for x in cmd.split(' ', 1)]
name = parts[0].lower()
args = request_args.get('args', '')
if not isinstance(args, str):
args = ' '.join(args)
args += ' {}'.format(parts[1] if len(parts) > 1 else '').strip()
except Exception as err:
return response.fail(
code='INVALID_COMMAND',
message='Unable to parse command',
cmd=cmd if cmd else '',
parts=parts,
name=name,
args=args,
error=err,
mime_type='{}'.format(flask.request.mimetype),
request_data='{}'.format(flask.request.data),
request_args=request_args
).response
return response.returns(name, args)
def execute(asynchronous: bool = False) -> environ.Response:
"""
:param asynchronous:
Whether or not to allow asynchronous command execution that returns
before the command is complete with a run_uid that can be used to
track the continued execution of the command until completion.
"""
r = parse_command_args()
if r.failed:
return r
cmd, args = r.returned
try:
r = commander.execute(cmd, args, r)
if not r.thread:
return r
if not asynchronous:
r.thread.join()
# Watch the thread for a bit to see if the command finishes in
# that time. If it does the command result will be returned directly
# to the caller. Otherwise, a waiting command will be issued
for _ in range(5):
r.thread.join(0.25)
if not r.thread.is_alive():
break
if r.thread.is_alive():
ui_configs.ACTIVE_EXECUTION_RESPONSE = r
return Response().update(
run_log=r.get_thread_log(),
run_status='running',
run_uid=r.thread.uid,
)
return r.update(
run_log=r.get_thread_log(),
run_status='complete',
run_uid=r.thread.uid
)
except Exception as error:
return r.fail(
code='KERNEL_EXECUTION_FAILURE',
message='Unable to execute command.',
cmd=cmd,
args=args,
error=error
).response
def abort() -> environ.Response:
response = ui_configs.ACTIVE_EXECUTION_RESPONSE
ui_configs.ACTIVE_EXECUTION_RESPONSE = None
should_abort = (
response is not None
and response.thread
and response.thread.is_alive()
)
if should_abort:
# Try to stop the thread gracefully first.
response.thread.abort = True
response.thread.join(2)
try:
# Force stop the thread explicitly
if response.thread.is_alive():
response.thread.abort_running()
except Exception:
pass
project = cauldron.project.get_internal_project()
if project and project.current_step:
step = project.current_step
if step.is_running:
step.mark_dirty(True)
step.progress = 0
step.progress_message = None
step.dumps(False)
step.is_running = False
# Make sure this is called prior to printing response information to
# the console or that will come along for the ride
redirection.disable(step)
# Make sure no print redirection will survive the abort process regardless
# of whether an active step was found or not (prevents race conditions)
redirection.restore_default_configuration()
return environ.Response()
```
#### File: ui/routes/__init__.py
```python
import flask
from cauldron.ui import configs as ui_configs
blueprint = flask.Blueprint(
name='roots',
import_name=__name__,
)
@blueprint.route('/')
def hello():
return flask.redirect(
'{}/app'.format(ui_configs.ROOT_PREFIX),
code=302
)
```
#### File: routes/notebooks/__init__.py
```python
import mimetypes
import os
import cauldron
import flask
import requests
from cauldron import environ
from cauldron.ui import configs as ui_configs
blueprint = flask.Blueprint(
name='notebooks',
import_name=__name__,
url_prefix='{}/notebook'.format(ui_configs.ROOT_PREFIX)
)
def _get_remote_view(route: str) -> flask.Response:
endpoint = route.lstrip('/')
request = flask.request
response = requests.request(
method=request.method,
url='{}/view/{}'.format(environ.remote_connection.url, endpoint),
headers={k: v for k, v in request.headers if k != 'Host'},
data=request.get_data(),
cookies=request.cookies,
allow_redirects=False
)
excluded_headers = [
'connection',
'content-encoding',
'content-length',
'transfer-encoding',
]
headers = [
(name, value)
for name, value in response.raw.headers.items()
if name.lower() not in excluded_headers
]
return flask.Response(response.content, response.status_code, headers)
@blueprint.route('/<path:route>', methods=['GET', 'POST'])
def notebook(route: str):
"""
Retrieves the contents of the file specified by the view route if it
exists.
"""
is_remote = environ.remote_connection.active
load_from_resources = (
route.startswith('assets')
or (
is_remote
and route in ['project.css', 'project.js']
)
)
if load_from_resources:
# If a local version of the asset exists, send that from the
# resources directory instead of the results directory.
local_asset_path = environ.paths.resources('web', route)
if os.path.exists(local_asset_path):
return flask.send_file(
local_asset_path,
mimetype=mimetypes.guess_type(local_asset_path)[0],
cache_timeout=-1
)
if is_remote:
return _get_remote_view(route)
project = cauldron.project.get_internal_project()
results_path = project.results_path if project else None
if not project or not results_path:
return '', 204
path = os.path.join(results_path, route)
if not os.path.exists(path):
return '', 204
return flask.send_file(
path,
mimetype=mimetypes.guess_type(path)[0],
cache_timeout=-1
)
```
#### File: ui/statuses/_reconciler.py
```python
import os
from cauldron import environ
from cauldron.ui import configs as ui_configs
from cauldron.ui.statuses import _utils
def _mark_dirty_after(step_data: dict, timestamp: float) -> dict:
"""
Modifies the step_data to mark it dirty if the step data is for
a remote project and the remote file (on the local system) has
been modified more recently than it has been synchronized to the
kernel system.
"""
path = step_data.get('remote_source_path')
status = step_data.get('status')
if not path or not status:
return step_data
try:
file_modified = os.path.getmtime(path)
except FileNotFoundError:
file_modified = 0
is_dirty = (
status.get('dirty', False)
or not os.path.exists(path)
or timestamp < file_modified
)
step_data.update(dirty=is_dirty)
status.update(
dirty=is_dirty,
is_dirty=is_dirty,
file_modified=file_modified
)
return step_data
def localize_dirty_steps(project_data: dict) -> dict:
"""
Will mark steps as dirty, even if the remote status says they
are not if the local files have been modified more recently
than the remote sync timestamp, i.e. a step needs to be synced
to the remote.
:param project_data:
Remote response kernel-serialized project data in which
step data exists to localize.
:return:
The modified kernel-serialized project data.
"""
if not project_data:
return project_data
last_timestamp = environ.remote_connection.sync_timestamp
project_data['steps'] = [
_mark_dirty_after(s, last_timestamp)
for s in project_data.get('steps') or []
]
return project_data
def merge_local_state(remote_status: dict, force: bool) -> dict:
"""
When proxying a remote status through a local cauldron process,
it's necessary to merge in local state values as part of the
proxy process given that not all remote state is the important
state to be reporting to the UI.
:param remote_status:
The remote status payload to merge local state into.
:param force:
Whether or not to force the hash of the finalized status
to ensure that the state is updated when returned to the UI.
"""
# Steps modified locally should be identified as dirty
# or the status display.
remote_status['data']['project'] = localize_dirty_steps(
remote_status['data'].get('project')
)
# We care about the local remote connection, which is active,
# not the remote one.
remote_status['data']['remote'] = environ.remote_connection.serialize()
# We care about the local viewer, not the remote one.
remote_status['data']['view'] = environ.view
# We care about the local UI environment command executions.
remote_status['data']['is_active_async'] = ui_configs.is_active_async()
# We want the local UI server version to be included.
remote_status['data']['ui_server_version'] = environ.version
# We want the local UI python version to be included.
remote_status['data']['ui_python_version'] = environ.python_version
remote_status['hash'] = _utils.get_digest_hash(remote_status, force)
return remote_status
```
#### File: cauldron/cauldron-web/deploy.py
```python
import os
import shutil
MY_DIRECTORY = os.path.realpath(os.path.dirname(__file__))
def main():
"""Copies the current dist directory into the cauldron Python package."""
print('\n\n=== DEPLOYING ====\n')
dist_path = os.path.join(MY_DIRECTORY, 'dist')
deploy_path = os.path.realpath(os.path.join(
MY_DIRECTORY,
'..', 'cauldron', 'resources', 'web'
))
print(f'DIST PATH: {dist_path}')
print(f'DEPLOY PATH: {deploy_path}')
print(f'[INFO]: Removing existing deployed files.')
shutil.rmtree(deploy_path)
print(f'[INFO]: Copying dist files to deployment path')
shutil.copytree(dist_path, deploy_path)
print('[SUCCESS]: Deployment operation complete.')
if __name__ == '__main__':
main()
``` |
{
"source": "JohnnyPeng18/coach",
"score": 2
} |
#### File: rl_coach/architectures/embedder_parameters.py
```python
from typing import List, Union
from rl_coach.base_parameters import EmbedderScheme, NetworkComponentParameters
MOD_NAMES = {'image': 'ImageEmbedder', 'vector': 'VectorEmbedder', 'tensor': 'TensorEmbedder'}
class InputEmbedderParameters(NetworkComponentParameters):
def __init__(self, activation_function: str='relu', scheme: Union[List, EmbedderScheme]=EmbedderScheme.Medium,
batchnorm: bool=False, dropout_rate: float=0.0, name: str='embedder', input_rescaling=None,
input_offset=None, input_clipping=None, dense_layer=None, is_training=False, flatten=True):
super().__init__(dense_layer=dense_layer)
self.activation_function = activation_function
self.scheme = scheme
self.batchnorm = batchnorm
self.dropout_rate = dropout_rate
if input_rescaling is None:
input_rescaling = {'image': 255.0, 'vector': 1.0, 'tensor': 1.0}
if input_offset is None:
input_offset = {'image': 0.0, 'vector': 0.0, 'tensor': 0.0}
self.input_rescaling = input_rescaling
self.input_offset = input_offset
self.input_clipping = input_clipping
self.name = name
self.is_training = is_training
self.flatten = flatten
def path(self, emb_type):
return 'rl_coach.architectures.tensorflow_components.embedders:' + MOD_NAMES[emb_type]
```
#### File: rl_coach/environments/robosuite_environment.py
```python
from typing import Union ,Dict, Any
from enum import Enum, Flag, auto
from copy import deepcopy
import numpy as np
import random
from collections import namedtuple
try:
import robosuite
from robosuite.wrappers import Wrapper, DomainRandomizationWrapper
except ImportError:
from rl_coach.logger import failed_imports
failed_imports.append("Robosuite")
from rl_coach.base_parameters import Parameters, VisualizationParameters
from rl_coach.environments.environment import Environment, EnvironmentParameters, LevelSelection
from rl_coach.spaces import BoxActionSpace, VectorObservationSpace, StateSpace, PlanarMapsObservationSpace
# Importing our custom Robosuite environments here so that they are properly
# registered in Robosuite, and so recognized by 'robosuite.make()' and included
# in 'robosuite.ALL_ENVIRONMENTS'
import rl_coach.environments.robosuite.cube_exp
robosuite_environments = list(robosuite.ALL_ENVIRONMENTS)
robosuite_robots = list(robosuite.ALL_ROBOTS)
robosuite_controllers = list(robosuite.ALL_CONTROLLERS)
def get_robosuite_env_extra_parameters(env_name: str):
import inspect
assert env_name in robosuite_environments
env_params = inspect.signature(robosuite.environments.REGISTERED_ENVS[env_name]).parameters
base_params = list(RobosuiteBaseParameters().env_kwargs_dict().keys()) + ['robots', 'controller_configs']
return {n: p.default for n, p in env_params.items() if n not in base_params}
class OptionalObservations(Flag):
NONE = 0
CAMERA = auto()
OBJECT = auto()
class RobosuiteBaseParameters(Parameters):
def __init__(self, optional_observations: OptionalObservations = OptionalObservations.NONE):
super(RobosuiteBaseParameters, self).__init__()
# NOTE: Attribute names should exactly match the attribute names in Robosuite
self.horizon = 1000 # Every episode lasts for exactly horizon timesteps
self.ignore_done = True # True if never terminating the environment (ignore horizon)
self.reward_shaping = True # if True, use dense rewards.
# How many control signals to receive in every simulated second. This sets the amount of simulation time
# that passes between every action input (this is NOT the same as frame_skip)
self.control_freq = 10
# Optional observations (robot state is always returned)
# if True, every observation includes a rendered image
self.use_camera_obs = bool(optional_observations & OptionalObservations.CAMERA)
# if True, include object (cube/etc.) information in the observation
self.use_object_obs = bool(optional_observations & OptionalObservations.OBJECT)
# Camera parameters
self.has_renderer = False # Set to true to use Mujoco native viewer for on-screen rendering
self.render_camera = 'frontview' # name of camera to use for on-screen rendering
self.has_offscreen_renderer = self.use_camera_obs
self.render_collision_mesh = False # True if rendering collision meshes in camera. False otherwise
self.render_visual_mesh = True # True if rendering visual meshes in camera. False otherwise
self.camera_names = 'agentview' # name of camera for rendering camera observations
self.camera_heights = 84 # height of camera frame.
self.camera_widths = 84 # width of camera frame.
self.camera_depths = False # True if rendering RGB-D, and RGB otherwise.
# Collision
self.penalize_reward_on_collision = True
self.end_episode_on_collision = False
@property
def optional_observations(self):
flag = OptionalObservations.NONE
if self.use_camera_obs:
flag = OptionalObservations.CAMERA
if self.use_object_obs:
flag |= OptionalObservations.OBJECT
elif self.use_object_obs:
flag = OptionalObservations.OBJECT
return flag
@optional_observations.setter
def optional_observations(self, value):
self.use_camera_obs = bool(value & OptionalObservations.CAMERA)
if self.use_camera_obs:
self.has_offscreen_renderer = True
self.use_object_obs = bool(value & OptionalObservations.OBJECT)
def env_kwargs_dict(self):
res = {k: (v.value if isinstance(v, Enum) else v) for k, v in vars(self).items()}
return res
class RobosuiteEnvironmentParameters(EnvironmentParameters):
def __init__(self, level, robot=None, controller=None, apply_dr: bool = False,
dr_every_n_steps_min: int = 10, dr_every_n_steps_max: int = 20,
use_joint_vel_obs=False):
super().__init__(level=level)
self.base_parameters = RobosuiteBaseParameters()
self.extra_parameters = {}
self.robot = robot
self.controller = controller
self.apply_dr = apply_dr
self.dr_every_n_steps_min = dr_every_n_steps_min
self.dr_every_n_steps_max = dr_every_n_steps_max
self.use_joint_vel_obs = use_joint_vel_obs
self.custom_controller_config_fpath = None
@property
def path(self):
return 'rl_coach.environments.robosuite_environment:RobosuiteEnvironment'
DEFAULT_REWARD_SCALES = {
'Lift': 2.25,
'LiftLab': 2.25,
}
RobosuiteStepResult = namedtuple('RobosuiteStepResult', ['observation', 'reward', 'done', 'info'])
# Environment
class RobosuiteEnvironment(Environment):
def __init__(self, level: LevelSelection,
seed: int, frame_skip: int, human_control: bool, custom_reward_threshold: Union[int, float, None],
visualization_parameters: VisualizationParameters,
base_parameters: RobosuiteBaseParameters,
extra_parameters: Dict[str, Any],
robot: str, controller: str,
target_success_rate: float = 1.0, apply_dr: bool = False,
dr_every_n_steps_min: int = 10, dr_every_n_steps_max: int = 20, use_joint_vel_obs=False,
custom_controller_config_fpath=None, **kwargs):
super(RobosuiteEnvironment, self).__init__(level, seed, frame_skip, human_control, custom_reward_threshold,
visualization_parameters, target_success_rate)
# Validate arguments
self.frame_skip = max(1, self.frame_skip)
def validate_input(input, supported, name):
if input not in supported:
raise ValueError("Unknown Robosuite {0} passed: '{1}' ; Supported {0}s are: {2}".format(
name, input, ' | '.join(supported)
))
validate_input(self.env_id, robosuite_environments, 'environment')
validate_input(robot, robosuite_robots, 'robot')
self.robot = robot
if controller is not None:
validate_input(controller, robosuite_controllers, 'controller')
self.controller = controller
self.base_parameters = base_parameters
self.base_parameters.has_renderer = self.is_rendered and self.native_rendering
self.base_parameters.has_offscreen_renderer = self.base_parameters.use_camera_obs or (self.is_rendered and not
self.native_rendering)
# Seed
if self.seed is not None:
np.random.seed(self.seed)
random.seed(self.seed)
# Load and initialize environment
env_args = self.base_parameters.env_kwargs_dict()
env_args.update(extra_parameters)
if 'reward_scale' not in env_args and self.env_id in DEFAULT_REWARD_SCALES:
env_args['reward_scale'] = DEFAULT_REWARD_SCALES[self.env_id]
env_args['robots'] = self.robot
controller_cfg = None
if self.controller is not None:
controller_cfg = robosuite.controllers.load_controller_config(default_controller=self.controller)
elif custom_controller_config_fpath is not None:
controller_cfg = robosuite.controllers.load_controller_config(custom_fpath=custom_controller_config_fpath)
env_args['controller_configs'] = controller_cfg
self.env = robosuite.make(self.env_id, **env_args)
# TODO: Generalize this to filter any observation by name
if not use_joint_vel_obs:
self.env.modify_observable('robot0_joint_vel', 'active', False)
# Wrap with a dummy wrapper so we get a consistent API (there are subtle changes between
# wrappers and actual environments in Robosuite, for example action_spec as property vs. function)
self.env = Wrapper(self.env)
if apply_dr:
self.env = DomainRandomizationWrapper(self.env, seed=self.seed, randomize_every_n_steps_min=dr_every_n_steps_min,
randomize_every_n_steps_max=dr_every_n_steps_max)
# State space
self.state_space = self._setup_state_space()
# Action space
low, high = self.env.unwrapped.action_spec
self.action_space = BoxActionSpace(low.shape, low=low, high=high)
self.reset_internal_state()
if self.is_rendered:
image = self.get_rendered_image()
self.renderer.create_screen(image.shape[1], image.shape[0])
# TODO: Other environments call rendering here, why? reset_internal_state does it
def _setup_state_space(self):
state_space = StateSpace({})
dummy_obs = self._process_observation(self.env.observation_spec())
state_space['measurements'] = VectorObservationSpace(dummy_obs['measurements'].shape[0])
if self.base_parameters.use_camera_obs:
state_space['camera'] = PlanarMapsObservationSpace(dummy_obs['camera'].shape, 0, 255)
return state_space
def _process_observation(self, raw_obs):
new_obs = {}
# TODO: Support multiple cameras, this assumes a single camera
camera_name = self.base_parameters.camera_names
camera_obs = raw_obs.get(camera_name + '_image', None)
if camera_obs is not None:
depth_obs = raw_obs.get(camera_name + '_depth', None)
if depth_obs is not None:
depth_obs = np.expand_dims(depth_obs, axis=2)
camera_obs = np.concatenate([camera_obs, depth_obs], axis=2)
new_obs['camera'] = camera_obs
measurements = raw_obs['robot0_proprio-state']
object_obs = raw_obs.get('object-state', None)
if object_obs is not None:
measurements = np.concatenate([measurements, object_obs])
new_obs['measurements'] = measurements
return new_obs
def _take_action(self, action):
action = self.action_space.clip_action_to_space(action)
# We mimic the "action_repeat" mechanism of RobosuiteWrapper in Surreal.
# Same concept as frame_skip, only returning the average reward across repeated actions instead
# of the total reward.
rewards = []
for _ in range(self.frame_skip):
obs, reward, done, info = self.env.step(action)
rewards.append(reward)
if done:
break
reward = np.mean(rewards)
self.last_result = RobosuiteStepResult(obs, reward, done, info)
def _update_state(self):
obs = self._process_observation(self.last_result.observation)
self.state = {k: obs[k] for k in self.state_space.sub_spaces}
self.reward = self.last_result.reward or 0
self.done = self.last_result.done
self.info = self.last_result.info
def _restart_environment_episode(self, force_environment_reset=False):
reset_obs = self.env.reset()
self.last_result = RobosuiteStepResult(reset_obs, 0.0, False, {})
def _render(self):
self.env.render()
def get_rendered_image(self):
img: np.ndarray = self.env.sim.render(camera_name=self.base_parameters.render_camera,
height=512, width=512, depth=False)
return np.flip(img, 0)
def close(self):
self.env.close()
class RobosuiteGoalBasedExpEnvironmentParameters(RobosuiteEnvironmentParameters):
@property
def path(self):
return 'rl_coach.environments.robosuite_environment:RobosuiteGoalBasedExpEnvironment'
class RobosuiteGoalBasedExpEnvironment(RobosuiteEnvironment):
def _process_observation(self, raw_obs):
new_obs = super()._process_observation(raw_obs)
new_obs['obs-goal'] = None
return new_obs
def _setup_state_space(self):
state_space = super()._setup_state_space()
goal_based_shape = list(state_space['camera'].shape)
goal_based_shape[2] *= 2
state_space['obs-goal'] = PlanarMapsObservationSpace(tuple(goal_based_shape), 0, 255)
return state_space
``` |
{
"source": "JohnnyPeng18/growser",
"score": 3
} |
#### File: growser/growser/cmdr.py
```python
from collections import Iterable
from inspect import getmembers
from types import FunctionType, GeneratorType, ModuleType
from typing import Callable, Generic, List, TypeVar
class Message:
"""Base class for both commands and domain events."""
class Command(Message):
"""Objects that mutate state."""
class Coordinator(Command):
"""Commands responsible for coordinating between multiple commands."""
class DomainEvent(Message):
"""Objects that result from state mutation."""
class Query(Message):
"""Objects for requesting data."""
#: Generic :class:`Command` type
T = TypeVar('T')
class Handles(Generic[T]):
"""Indicates that a class handles instances of type `T`.
Example::
class SingleCommandHandler(Handles[SingleCommand]):
def handle(cmd: SingleCommand):
pass
Classes can also handle multiple commands::
class MultiHandler(Handles[FirstCommand], Handles[SecondCommand]):
def handle_first(cmd: FirstCommand):
pass
def handle_second(cmd: SecondCommand)
pass
"""
def handles_decorator():
handlers = {}
def wrapper(command):
def inner(func):
handlers[func] = command
return func
return inner
wrapper.handlers = handlers
return wrapper
#: Decorator for registering command handlers
handles = handles_decorator()
class Handler:
__slots__ = ['klass', 'func']
def __init__(self, klass: type, func: Callable):
"""A single callable used as a handler.
This makes it easier to create instances of :attr:`klass` when the
handler is a class method.
:param klass: Class instance `func` is expecting.
:param func: Callable responsible for handling commands of type `klass`.
"""
self.klass = klass
self.func = func
def __call__(self):
func = self.func
if self.klass:
func = getattr(self.klass(), self.func.__name__)
return func
def __repr__(self):
return "Handler<{} {}>".format(self.klass, self.func.__qualname__)
class HandlerInvoker:
__slots__ = ['klass', 'handler']
def __init__(self, klass: type, handler: Callable[..., FunctionType]):
"""Intermediary between the executing command bus and handler.
:param klass: Type of class that is being executed
:param handler: Callable that executes instance of this type
"""
self.klass = klass
self.handler = handler
def execute(self, command: Command):
"""Execute the command using the registered command handler.
:param command: An instance of :attr:`klass`.
"""
results = self.handler(command)
if isinstance(results, GeneratorType):
results = list(results)
return results
def __repr__(self):
return '<{} {} {}>'.format(
self.__class__.__name__, self.klass.__name__, self.handler)
class Registry:
"""Scans modules & classes for handlers used for `commands` and `queries`.
Example::
registry = HandlerRegistry()
registry.scan(growser.handlers.events)
"""
def __init__(self):
self.handlers = {}
def find(self, klass: type) -> Handler:
"""Return the handler assigned to `klass`."""
return self.handlers.get(klass, None)
def scan(self, obj):
"""Register a module, class, or function as a handler.
:param obj: Object to register as a handler.
"""
handlers = []
if isinstance(obj, ModuleType):
handlers = scan_module(obj)
elif isinstance(obj, type):
handlers = scan_class(obj)
elif isinstance(obj, FunctionType):
handlers = scan_function(obj)
for klass, handler in handlers:
if issubclass(klass, Command) and klass in self.handlers:
raise DuplicateHandlerError(klass, handler)
self.handlers[klass] = handler
if not len(handlers):
raise TypeError('Invalid command handler')
def __iter__(self):
yield from self.handlers.items()
def scan_module(module: ModuleType):
"""Scan a module for handlers."""
if type(module) != ModuleType:
raise TypeError('Module required')
rv = []
for obj in getmembers(module):
if isinstance(obj[1], type) and issubclass(obj[1], Handles):
rv += scan_class(obj[1])
if type(obj[1]) == FunctionType:
rv += scan_function(obj[1])
return rv
def scan_class(klass: type):
"""Scan a class for handlers."""
if not isinstance(klass, type):
raise TypeError('Class required')
# Commands via : Handles[T]
expected = []
if hasattr(klass, '__parameters__'):
expected = [k for k in klass.__parameters__
if issubclass(k, Command)]
def is_func(f):
return type(f) == FunctionType and f.__name__[0] != '_'
rv = []
for func in [obj[1] for obj in getmembers(klass) if is_func(obj[1])]:
rv += scan_function(func, klass)
# Handlers declared by the class but not found
commands = [cmd[0] for cmd in rv]
missing = [cmd for cmd in expected if cmd not in commands]
if len(missing):
raise UnboundCommandError(missing)
return rv
def scan_function(obj, klass: type=None) -> List[tuple]:
"""Determine if a function or unbound class method is a handler.
The class bound to the function is determined by either the presence
of a type hint::
def handles(cmd: Klass):
Or a decorator::
@handles(Klass)
def handles(cmd):
:param obj: Function to register as a handler.
"""
if type(obj) != FunctionType:
raise TypeError('Expected FunctionType')
rv = []
# Method type hints e.g. def name(command: Type)
for param, param_type in obj.__annotations__.items():
if not isinstance(param_type, type) or param == 'return':
continue
if issubclass(param_type, Message):
rv.append((param_type, Handler(klass, obj)))
# Decorators using @handles(CommandType)
if hasattr(handles, 'handlers') and obj in handles.handlers:
if not any(cmd == handles.handlers[obj] for cmd, _ in rv):
rv.append((handles.handlers[obj], Handler(klass, obj)))
return rv
class LocalCommandBus:
"""Experimental command bus for executing messages in the local context."""
def __init__(self, registry: Registry):
self.registry = registry
def execute(self, cmd: Command) -> None:
"""Execute a command"""
handler = self.registry.find(cmd.__class__)
if not handler:
raise LookupError('No handler found for {}'.format(cmd.__class__))
invoker = HandlerInvoker(handler.klass, handler())
rv = invoker.execute(cmd)
# Allow commands to return commands to execute - will be removed after
# event listeners are integrated.
if isinstance(rv, Iterable):
for result in rv:
if isinstance(result, Command):
self.execute(result)
if isinstance(result, DomainEvent):
self.publish(result)
return rv
def publish(self, event: DomainEvent):
pass
class DuplicateHandlerError(Exception):
"""Command is bound to multiple handlers."""
def __init__(self, command: type, duplicate):
super().__init__('Duplicate handler found for {}: {}'.format(
command.__name__, duplicate))
class UnboundCommandError(Exception):
"""Handle[T] present but handler for `T` not found."""
def __init__(self, commands):
super().__init__('Commands bound without handlers: {}'.format(
', '.join(map(lambda x: x.__name__, commands))))
```
#### File: growser/growser/db.py
```python
from collections import OrderedDict
import datetime
from typing import Iterator, List
from flask_sqlalchemy import SQLAlchemy
from psycopg2.extensions import QuotedString
from sqlalchemy import Table
#: Number of rows to insert per batch transaction
BATCH_SIZE = 50000
class Column:
def __init__(self, name: str, python_type: type):
"""Wrapper to cast Python values for use in ad-hoc SQL.
Example::
columns = [Column('id', int), Column('amount', float)]
:param name: Name of the column.
:param python_type: Python type e.g. int, str, float.
"""
self.name = name
self.python_type = python_type
def escape(self, value) -> str:
"""Escape a value for use in a Postgres ad-hoc SQL statement."""
def to_str(val):
if isinstance(val, bytes):
val = val.decode('utf-8')
return QuotedString(val).getquoted().decode('utf-8')
func = self.python_type
if isinstance(value, (datetime.datetime, datetime.date)):
value = str(value)
func = to_str
if issubclass(self.python_type, str):
func = to_str
return func(value)
def __eq__(self, b):
return self.name == b.name and self.python_type == b.python_type
def __repr__(self):
return '{}<name={}, type={}>'.format(
self.__class__.__name__, self.name, self.python_type.__name__)
class ColumnCollection(OrderedDict):
def __init__(self, columns: list):
super().__init__([(c.name, c) for c in columns])
class BulkInsertFromIterator:
def __init__(self, table, data: Iterator, columns: list,
batch_size: int=BATCH_SIZE, header: bool=False):
"""Bulk insert into Postgres from an iterator in fixed-size batches.
Example::
bulk = BulkInsertFromIterator(
'table.name',
iter([[1, 'Python'], [2, 'PyPy', 3]]),
[Column('id', int), Column('name', str)]
)
bulk.execute(db.engine.raw_connection)
:param table: Name of the table.
:param data: Iterable containing the data to insert.
:param columns: List of :class:`Column` objects.
:param batch_size: Rows to insert per batch.
:param header: True if the first row is a header.
"""
self.table = table
self.data = data
self.columns = columns
self.batch_size = batch_size
self.header = header
if isinstance(self.data, list):
self.data = iter(self.data)
if not isinstance(self.data, Iterator):
raise TypeError('Expected Iterator, got {}'.format(
self.data.__class__))
if not self.columns:
raise ValueError('Columns cannot be empty')
if isinstance(self.columns[0], tuple):
self.columns = [Column(*c) for c in self.columns]
def batch_execute(self, conn):
"""Insert data in batches of `batch_size`.
:param conn: A DB API 2.0 connection object
"""
def batches(data, batch_size) -> list:
"""Return batches of length `batch_size` from any object that
supports iteration without knowing length."""
rv = []
for idx, line in enumerate(data):
if idx != 0 and idx % batch_size == 0:
yield rv
rv = []
rv.append(line)
yield rv
columns = ColumnCollection(self.columns)
if self.header:
self.columns = [columns.get(h) for h in next(self.data)]
columns = ColumnCollection(self.columns)
total = 0
query = BulkInsertQuery(self.table, columns)
for batch in batches(self.data, self.batch_size):
total += query.execute(conn, batch) or 0
yield total
def execute(self, conn):
"""Execute all batches."""
return max(list(self.batch_execute(conn)))
class BulkInsertQuery:
def __init__(self, table: str, columns: ColumnCollection):
"""Execute a multi-row INSERT statement.
This does not take advantage of parameterized queries, but escapes
string values manually in :class:`Column`.
:param table: Name of the table being inserted into.
:param columns: Columns required for type coercion.
"""
self.table = table
self.columns = columns
self.query = 'INSERT INTO {} ({}) VALUES '.format(
table, ', '.join([c for c in columns]))
def execute(self, conn, rows: list) -> int:
"""Execute a single multi-row INSERT for `rows`.
:param conn: Function that returns a database connection
:param rows: List of tuples in the same order as :attr:`columns`.
"""
if not len(rows):
raise ValueError('No data provided')
if len(self.columns) != len(rows[0]):
raise ValueError('Expecting {} columns, found {}'.format(
len(self.columns), len(rows[0])))
conn = conn()
cursor = conn.cursor()
try:
cursor.execute(self.query + ', '.join(self.escape_rows(rows)))
conn.commit()
finally:
cursor.close()
conn.close()
return len(rows)
def escape_rows(self, rows: list):
"""Escape values for use in non-parameterized SQL queries.
:param rows: List of values to escape.
"""
def to_tuple(values):
rv = []
for column, value in zip(self.columns, values):
rv.append(self.columns.get(column).escape(value))
return tuple(rv)
for idx, row in enumerate(rows):
rows[idx] = '({})'.format(', '.join(map(str, to_tuple(row))))
return rows
def from_sqlalchemy_table(table: Table, data: Iterator, columns: list,
batch_size: int=BATCH_SIZE, header: bool=False):
"""Return a :class:`BulkInsertFromIterator` based on the metadata
of a SQLAlchemy table.
Example::
batch = from_sqlalchemy_table(
Rating.__table__,
data,
['rating_id', 'repo_id', 'login_id', 'rating']
)
:param table: A :class:`sqlalchemy.Table` instance.
:param data: An iterator.
:param columns: List of column names to use.
"""
if not isinstance(table, Table):
raise TypeError('Expected sqlalchemy.Table, got {}'.format(table))
wrapped = []
for name in columns:
column = table.columns.get(name)
wrapped.append(Column(str(column.name), column.type.python_type))
return BulkInsertFromIterator(table, data, wrapped, batch_size, header)
def as_columns(columns) -> List[Column]:
rv = []
for column in columns:
if isinstance(column, Column):
rv.append(column)
if isinstance(column, tuple):
rv.append(Column(*column))
if isinstance(column, str):
rv.append(Column(column, str))
return rv
def to_dict_model(self) -> dict:
"""Returns a single SQLAlchemy model instance as a dictionary."""
return dict((key, getattr(self, key)) for key in self.__mapper__.c.keys())
def to_dict_query(self) -> list:
"""Returns all SQLAlchemy records in a query as dictionaries."""
return [row.to_dict() for row in self.all()]
class SQLAlchemyAutoCommit(SQLAlchemy):
"""By default ``psycopg2`` will wrap SELECT statements in a transaction.
This can be avoided using AUTOCOMMIT to rely on Postgres' default
implicit transaction mode (see this `blog post <http://bit.ly/1N0a7Lj>`_
for more details).
"""
def apply_driver_hacks(self, app, info, options):
super().apply_driver_hacks(app, info, options)
options['isolation_level'] = 'AUTOCOMMIT'
```
#### File: growser/growser/google.py
```python
from collections import Sized
from io import FileIO
from itertools import chain
import os
from apiclient.discovery import build
from apiclient.http import MediaIoBaseDownload
from apiclient.errors import HttpError
from httplib2 import Http
from oauth2client.client import SignedJwtAssertionCredentials
def _client(service_name, version, account_name, private_key, scope):
auth = SignedJwtAssertionCredentials(account_name, private_key, scope)
return build(service_name, version, http=auth.authorize(Http()))
class BaseService:
"""Authentication required for all Google API services."""
service_name = None
version = None
scope = None
def __init__(self, project_id, account_name, private_key):
self.project_id = project_id
self.account_name = account_name
self.private_key = private_key
self._client = None
@property
def client(self):
if self._client is None:
self._client = _client(self.service_name, self.version,
self.account_name, self.private_key,
self.scope)
return self._client
class BigQueryService(BaseService):
"""Wrapper over google-api-client for working with BigQuery."""
service_name = 'bigquery'
version = 'v2'
scope = 'https://www.googleapis.com/auth/bigquery'
@property
def jobs(self):
return self.client.jobs()
@property
def tables(self):
return self.client.tables()
class CloudStorageService(BaseService):
"""Wrapper over google-api-client for working with Cloud Storage."""
service_name = 'storage'
version = 'v1'
scope = 'https://www.googleapis.com/auth/devstorage.full_control'
@property
def objects(self):
return self.client.objects()
@property
def buckets(self):
return self.client.buckets()
class BaseJob:
def __init__(self, api):
self.api = api
self.project_id = self.api.project_id
self.id = ''
class BigQueryJob(BaseJob):
num_retries = 5
@property
def info(self):
if not self.id:
raise JobNotCompleteException('Job not complete')
return self._call('get', jobId=self.id)
@property
def is_complete(self):
return self.info['status']['state'] == 'DONE'
def query(self, body: dict):
return self._call('query', body=body)
def insert(self, body: dict):
return self._call('insert', body=body)
def _call(self, api: str, **kwargs):
"""Execute a request against the Google API."""
func = getattr(self.api.jobs, api)(projectId=self.project_id, **kwargs)
rv = func.execute(num_retries=self.num_retries)
if 'jobReference' in rv:
self.id = rv['jobReference']['jobId']
if 'errors' in rv['status']:
raise JobFailedException(rv['status']['errors'])
return rv
class ExecuteQuery(BigQueryJob):
def run(self, query: str):
"""Execute a query and immediately return the results.
:param query: Query to execute on BigQuery.
"""
results = self.query({'query': query})
return QueryResult(iter([results]))
class ExecuteAsyncQuery(BigQueryJob):
def run(self, query: str):
"""Execute a query in batch mode to be retrieved later.
:param query: Query to run in batch mode.
"""
body = {
'configuration': {
'query': {
'query': query,
'priority': 'BATCH'
}
}
}
return self.insert(body)
def results(self):
return FetchQueryResults(self.api).run(self.id)
class FetchQueryResults(BigQueryJob):
def run(self, job_id: str):
"""Fetch all results from a query stored on BigQuery."""
self.id = job_id
if not self.is_complete:
raise JobNotCompleteException('Job is not complete')
return QueryResult(self._pages())
def _pages(self):
"""Return all pages of results for the query."""
kwargs = {'jobId': self.id}
has_token = True
while has_token:
rv = self._call('getQueryResults', **kwargs)
has_token = 'pageToken' in rv
if has_token:
kwargs['pageToken'] = rv['pageToken']
yield rv
class DeleteTable(BigQueryJob):
def run(self, table: str):
"""Delete a table hosted on BigQuery.
:param table: Name of the table to delete.
"""
try:
self.api.tables.delete(**_table(self.project_id, table)).execute()
except HttpError:
return False
return True
@property
def is_complete(self):
"""API has empty response, assume true."""
return True
class PersistQueryToTable(BigQueryJob):
"""Execute a query and save the results to a table."""
def run(self, query: str, destination: str):
DeleteTable(self.api).run(destination)
body = {
'configuration': {
'query': {
'query': query,
'allowLargeResults': True,
'destinationTable': _table(self.project_id, destination)
}
}
}
return self.insert(body)
class ExportTableToCSV(BigQueryJob):
"""Export a table to Google Cloud Storage as compressed CSV files."""
def run(self, source: str, destination: str):
body = {
'configuration': {
'extract': {
'sourceTable': _table(self.project_id, source),
'destinationUris': [destination],
'destinationFormat': 'CSV',
'compression': 'GZIP'
}
}
}
return self.insert(body)
class QueryResult(Sized):
def __init__(self, pages):
self._pages = pages
self._first = next(self._pages)
self.fields = [f['name'] for f in self._first['schema']['fields']]
self.total_rows = int(self._first['totalRows'])
def rows(self, as_dict: bool=False):
def to_tuple(row):
return list(map(lambda x: x['v'], row['f']))
def to_dict(row):
return dict(zip(self.fields, to_tuple(row)))
for response in chain([self._first], self._pages):
transform = to_dict if as_dict else to_tuple
yield from (transform(row) for row in response['rows'])
def __len__(self):
return self.total_rows
class JobNotCompleteException(Exception):
pass
class JobFailedException(Exception):
pass
def _table(project_id, table):
id1, id2 = table.split('.')
return {'projectId': project_id, 'datasetId': id1, 'tableId': id2}
class GoogleStorageJob(BaseJob):
"""Base for any job that runs against Google Cloud Storage."""
class DownloadFile(GoogleStorageJob):
"""Download a file from a Google Cloud Storage bucket to a local path."""
def run(self, bucket: str, obj: str, local_path: str):
archive = self.api.objects.get_media(bucket=bucket, object=obj)
filename = os.path.join(local_path, os.path.basename(obj))
with FileIO(filename, 'wb') as fh:
downloader = MediaIoBaseDownload(fh, archive, chunksize=1024*1024)
complete = False
while not complete:
_, complete = downloader.next_chunk()
return filename
class DeleteFile(GoogleStorageJob):
"""Delete a file from a Google Cloud Storage bucket"""
def run(self, bucket: str, obj: str):
try:
self.api.objects.delete(bucket=bucket, object=obj).execute()
return True
except HttpError:
# Error is returned if the object does not exist - can ignore
return False
class FindFilesMatchingPrefix(GoogleStorageJob):
"""Return a list of all files matching `prefix`."""
def run(self, bucket: str, prefix: str):
response = self.api.objects \
.list(bucket=bucket, prefix=prefix).execute()
return [i for i in response['items'] if int(i['size']) > 0]
class DownloadBucketPath(GoogleStorageJob):
"""Download a Google Storage bucket to a local path."""
def run(self, bucket: str, bucket_path: str, local_path: str):
archives = FindFilesMatchingPrefix(self.api).run(bucket, bucket_path)
filenames = []
for file in archives:
filenames.append(DownloadFile(self.api).run(
bucket, file['name'], local_path))
DeleteFile(self.api).run(bucket, file['name'])
return filenames
```
#### File: growser/tests/test_bigquery.py
```python
import random
import unittest
from unittest.mock import MagicMock, Mock
import uuid
from apiclient.errors import HttpError
from growser.google import _table
from growser.google import DeleteTable
from growser.google import ExecuteAsyncQuery
from growser.google import ExecuteQuery
from growser.google import ExportTableToCSV
from growser.google import PersistQueryToTable
from growser.google import QueryResult
PROJECT_ID = "test_project_id"
class BigQueryServiceTestCase(unittest.TestCase):
def service(self):
return MagicMock(project_id=PROJECT_ID)
def test_DeleteTable(self):
service = self.service()
table = "table.to_delete"
job = DeleteTable(service)
success = job.run(table)
service.tables.delete.assert_called_with(**_table(PROJECT_ID, table))
self.assertTrue(job.is_complete)
self.assertTrue(success)
def test_DeleteTable_not_exist(self):
service = self.service()
table = "table.to_delete"
service.tables.delete = Mock(side_effect=HttpError('url', b'content'))
job = DeleteTable(service)
success = job.run(table)
self.assertFalse(success)
def test_ExecuteAsyncQuery(self):
service = self.service()
query = "SELECT * FROM ds.table LIMIT 100"
ExecuteAsyncQuery(service).run(query)
self.assertIn(query, str(service.mock_calls[0]))
def test_ExecuteQuery(self):
service = self.service()
query = "SELECT * FROM ds.table LIMIT 100"
ExecuteQuery(service).run(query)
expected = {"body": {"query": query}, "projectId": PROJECT_ID}
service.jobs.query.assert_called_once_with(**expected)
def test_ExportTableToCSV(self):
service = self.service()
source = "source.table"
destination = "gs://some-path/files.gz"
ExportTableToCSV(service).run(source, destination)
extract = service.mock_calls[0][2]['body']['configuration']['extract']
check1 = extract['sourceTable']
check2 = extract['destinationUris']
self.assertEqual(service.mock_calls[0][0], "jobs.insert")
self.assertEqual(check1, _table(PROJECT_ID, source))
self.assertIn(destination, check2)
def test_PersistQueryToTable(self):
service = self.service()
query = "SELECT * FROM ds.table LIMIT 100"
table = "ds.export_table"
PersistQueryToTable(service).run(query, table)
body = service.jobs.insert.call_args_list[0][1]['body']['configuration']['query']
expected = _table(PROJECT_ID, table)
service.tables.delete.assert_called_once_with(**expected)
self.assertEqual(expected, body['destinationTable'])
self.assertEqual(query, body['query'])
def test_QueryResult(self):
output = response_example(False)
result = QueryResult(iter([output]))
rows = list(result.rows())
rows_dict = list(result.rows(True))
expected_fields = ['repo_id', 'actor_id', 'event']
self.assertEqual(result.total_rows, len(output['rows']))
self.assertEqual(len(rows), len(output['rows']))
self.assertEqual(len(rows), len(result))
self.assertEqual(result.fields, expected_fields)
for field in expected_fields:
self.assertIn(field, rows_dict[0])
def response_example(token=False, errors=False):
def random_rows(num):
result = []
for i in range(num):
result.append({'f': [
{'v': random.randint(1,1000)},
{'v': random.randint(1000,2000)},
{'v': random.choice(['WatchEvent', 'ForkEvent'])}
]})
return result
rows = random_rows(random.randint(1, 10))
response = {
'cacheHit': False,
'jobComplete': True,
'jobReference': {
'jobId': 'job_K7wQRG0iaQbT4Y',
'projectId': PROJECT_ID
},
'kind': 'bigquery#queryResponse',
'rows': rows,
'schema': {
'fields': [
{'mode': 'NULLABLE', 'name': 'repo_id', 'type': 'INTEGER'},
{'mode': 'NULLABLE', 'name': 'actor_id', 'type': 'INTEGER'},
{'mode': 'NULLABLE', 'name': 'event', 'type': 'STRING'}
]
},
'status': {'state': 'DONE'},
'totalBytesProcessed': '1234567',
'totalRows': len(rows)
}
if errors:
response['status'] = {'errors': ["test"]}
if token:
response['pageToken'] = uuid.uuid4()
return response
``` |
{
"source": "JohnnyPeng18/HiTyper",
"score": 2
} |
#### File: HiTyper/hityper/rej_typerule.py
```python
from hityper.typeobject import TypeObject
import hityper.tdg
from hityper import logger
from copy import copy, deepcopy
logger.name = __name__
class Rej_TypingRule(object):
def __init__(self):
pass
def check_failed(self, ori, rej):
if len(ori) == len(rej):
logger.warning("Rejection Typing rule faild, all types are rejected.")
def act(self, outs, operands , op, func, attr, usertypes, iterable=False):
#if not about iterable
if not iterable:
if len(operands) > 0:
left = operands[0]
else:
left = None
right = None
if(len(operands)>1):
right = operands[1]
else:
right = None
if(left != None and (not isinstance(left, hityper.tdg.GraphBaseNode)) or (right != None and not isinstance(right, hityper.tdg.GraphBaseNode))):
raise ValueError("Operands must be a graph node")
if (op in ["and", "or"]):
return self.norej_add(outs,operands)
elif (op == "not"):
return self.norej_add(outs,operands)
elif (op in ["<", "<=", ">", ">="]):
return self.norej_add(outs,operands)
elif (op in ["==", "!=", "is", "isnot"]):
return self.norej_add(outs, operands)
elif (op == "+" and right != None):
return self.binop_add(outs,operands)
elif (op == "*"):
return self.binop_mul(outs,left, right)
elif (op in ["-", "/", "//", "%", "**", "pow"] and right != None):
return self.binop_num_op(outs, left, right, op)
elif (op in ["+", "-", "abs"] and right == None):
return self.NumRemainSame(outs,left, right)
elif (op in ["|", "^", "&", "<<", ">>"]):
return self.binop_int_op(outs, left, right)
elif (op == "~" and right == None):
return self.unop_int_op(outs, left, right)
elif (op in ["in", "not in"] ):
return self.norej_add(outs, operands)
elif (op == "forin" and right == None):
return self.unop_forin(outs, left, right)
elif (op == "append"):
return self.binop_append(outs,left, right)
elif (op == "Subscript_Write"):
return self.norej_add(outs, operands)
elif (op == "Subscript_Read"):
return self.binop_subscript(outs,operands, func, attr, usertypes)
elif (op == "=" and right == None):
return self.unop_assign(outs,left, right)
elif (op == "call"):
return self.rej_call(outs, operands, func, attr, usertypes)
elif (op == "List_Read"):
return self.norej_add(outs, operands)
elif( op == "List_Write"):
return self.List_Write(outs,operands)
elif(op == "Tuple_Read"):
return self.norej_add(outs, operands)
elif(op == "Tuple_Write"):
return self.Add_tofirst(outs,operands)
elif(op == "Set_Read"):
return self.norej_add(outs, operands)
elif(op =="Dict_Read"):
return self.norej_add(outs, operands)
elif(op == "JoinedStr"):
return self.norej_add(outs, operands)
elif(op=="."):
return self.norej_add(outs, operands)
elif(op=="ListComp"):
return self.Add_tofirst(outs, operands)
elif(op=="SetComp"):
return self.Add_tofirst(outs,operands)
elif(op=="DictComp"):
return self.norej_add(outs, operands)
elif(op=="GeneratorExp"):
return self.Add_tofirst(outs, operands)
else: #for unknown_op
return self.unknown_op(outs,operands, op)
#raise TypeError("Unknown Operation: " + op)
def binop_and_or(self,outs,left,right):
# since left and right can have arbitary types, no rej_type can be inferred
ltypes = left.types
rtypes = right.types
rej_ltypes = deepcopy(left.rejtypes)
rej_rtypes = deepcopy(right.rejtypes)
rej_outs = outs.rejtypes
return [rej_ltypes, rej_rtypes]
def norej_add(self,outs,operands):
# this function is for the arbitary input
inputlen = len(operands)
rej_list = []
for node in operands:
rej_each = node.rejtypes
rej_list.append(rej_each)
return rej_list
def binop_add(self, outs, operands):
left = operands[0]
ltypes = left.types
if len(operands) > 1:
right = operands[1]
rtypes = right.types
rej_rtypes = deepcopy(right.rejtypes)
rej_ltypes = deepcopy(left.rejtypes)
rej_outs = outs.rejtypes
for t in rej_outs:
# here we should divide the situation into 2: user-define or built-in
rej_ltypes.append(t)
rej_rtypes.append(t)
rej_ltypes = TypeObject.removeRedundantTypes(rej_ltypes)
rej_rtypes = TypeObject.removeRedundantTypes(rej_rtypes)
return [rej_ltypes, rej_rtypes]
def binop_mul(self,outs,left, right):
#add rej_out into rej_left
ltypes = left.types
rtypes = right.types
rej_ltypes = deepcopy(left.rejtypes)
rej_rtypes = deepcopy(right.rejtypes)
rej_outs = outs.rejtypes
for t in rej_outs:
rej_ltypes.append(t)
rej_ltypes = TypeObject.removeRedundantTypes(rej_ltypes)
return [rej_ltypes, rej_rtypes]
def binop_num_op(self,outs, left, right, op):
ltypes = left.types
rtypes = right.types
rej_ltypes = deepcopy(left.rejtypes)
rej_rtypes = deepcopy(right.rejtypes)
rej_outs = outs.rejtypes
for t in rej_outs:
# here we should divide the situation into 2: numbers or others
if t.type not in ["bool", "int", "float", "complex"]:
rej_ltypes.append(t)
rej_rtypes.append(t)
rej_ltypes = TypeObject.removeRedundantTypes(rej_ltypes)
rej_rtypes = TypeObject.removeRedundantTypes(rej_rtypes)
return [rej_ltypes, rej_rtypes]
def NumRemainSame(self, outs, left, right):
ltypes = left.types
rej_ltypes = deepcopy(left.rejtypes)
rej_outs = outs.rejtypes
for t in rej_outs:
rej_ltypes.append(t)
rej_ltypes = TypeObject.removeRedundantTypes(rej_ltypes)
return [rej_ltypes]
def binop_int_op(self, outs, left, right):
rej_outs = outs.rejtypes
ltypes = left.types
rej_ltypes = deepcopy(left.rejtypes)
rtypes = right.types
rej_rtypes = deepcopy(right.rejtypes)
for t in rej_outs:
rej_ltypes.append(t)
rej_rtypes.append(t)
rej_ltypes = TypeObject.removeRedundantTypes(rej_ltypes)
rej_rtypes = TypeObject.removeRedundantTypes(rej_rtypes)
return [rej_ltypes, rej_rtypes]
def unop_int_op(self, outs, left, right):
# add rej_out into left
rej_ltypes = deepcopy(left.rejtypes)
rej_outs = outs.rejtypes
for t in rej_outs:
rej_ltypes.append(t)
rej_ltypes = TypeObject.removeRedundantTypes(rej_ltypes)
# if there are more than 2 operands
return [rej_ltypes]
def unop_forin(self,outs, left, right):
# no right here
rej_ltypes = deepcopy(left.rejtypes)
rej_outs = outs.rejtypes
for t in rej_outs:
rej_ltypes.append(t)
rej_ltypes = TypeObject.removeRedundantTypes(rej_ltypes)
return [rej_ltypes]
def binop_append(self,outs,left, right):
rej_ltypes = deepcopy(left.rejtypes)
rej_rtypes = deepcopy(right.rejtypes)
rej_outs = outs.rejtypes
for t in rej_outs:
rej_ltypes.append(t)
rej_ltypes = TypeObject.removeRedundantTypes(rej_ltypes)
return [rej_ltypes,rej_rtypes]
def binop_subscript(self, outs,operands, func, attr, usertypes):
if len(operands)==2:
# no infer
rej_list = []
for node in operands:
rej_each = node.rejtypes
rej_list.append(rej_each)
return rej_list
else:
left = operands[0]
rej_ltypes = deepcopy(left.rejtypes)
rej_outs = outs.rejtypes
for t in rej_outs:
# here we should divide the situation into 2: user-define or built-in
rej_ltypes.append(t)
rej_ltypes = TypeObject.removeRedundantTypes(rej_ltypes)
rej_list = [rej_ltypes]
for n in range(1,len(operands)):
rej_each = operands[n].rejtypes
rej_list.append(rej_each)
return rej_list
def unop_assign(self, outs,left, right):
# add rejout to left
rej_ltypes = deepcopy(left.rejtypes)
rej_outs = outs.rejtypes
for t in rej_outs:
rej_ltypes.append(t)
rej_ltypes = TypeObject.removeRedundantTypes(rej_ltypes)
return [rej_ltypes]
def List_Write(self, outs,operands):
left = operands[0]
rej_ltypes = deepcopy(left.rejtypes)
rej_outs = outs.rejtypes
for t in rej_outs:
# here we should divide the situation into 2: user-define or built-in
rej_ltypes.append(t)
rej_ltypes = TypeObject.removeRedundantTypes(rej_ltypes)
rej_list = [rej_ltypes]
for node in range(1, len(operands)):
rej_each = node.rejtypes
rej_list.append(rej_each)
return rej_list
def Add_tofirst(self, outs, operands):
# add rej_out into operands[0]
left = operands[0]
rej_ltypes = deepcopy(left.rejtypes)
rej_outs = outs.rejtypes
for t in rej_outs:
rej_ltypes.append(t)
rej_ltypes = TypeObject.removeRedundantTypes(rej_ltypes)
rej_list = [rej_ltypes]
for n in range(1, len(operands)):
rej_each = operands[n].rejtypes
rej_list.append(rej_each)
return rej_list
def unknown_op(self,outs,operands,op):
print("Unknown Operation: " + op)
rej_list = []
for node in operands:
rej_each = node.rejtypes
rej_list.append(rej_each)
return rej_list
def rej_call(self, outs, operands, func, attr, usertypes):
rej_outs = outs.rejtypes
# if user-defined functions, no rej inference(because we don't know what they will do in the function)
if func in usertypes:
rej_list = []
for node in operands:
rej_each = node.rejtypes
rej_list.append(rej_each)
return rej_list
# not usertype, Widely used operation funcs, e.g. list.append()
# it means there could be overload, but we just simply don't infer here
else:
if attr == None:
# no infer
rej_list = []
for node in operands:
rej_each = node.rejtypes
rej_list.append(rej_each)
return rej_list
# e.g def foo() / a = foo()
elif func == "append" or func=="clear" or func=="copy" or func == "insert" or func == "pop" or func=="remove" or func=="discard" or func=="reverse" or func=="sort":
#add rej_out into operands[0] ,other don't infer
return self.Add_tofirst(outs, operands)
elif func == "get" or func=="popitem" or func=="setdefault" or func=="update" or func=="center" or func=="zfill" or func=="expandtabs" or func == "join":
return self.Add_tofirst(outs, operands)
elif func == "ljust" or func=="lower" or func=="lstrip" or func=="removeprefix" or func=="removesuffix" or func =="rjust" or func=="replace":
return self.Add_tofirst(outs, operands)
elif func == "rstrip" or func=="strip" or func=="add" or func=="difference" or func=="difference_update" or func=="intersection" or func=="intersection_update" or func== "union":
return self.Add_tofirst(outs, operands)
elif func=="symmetric_difference" or func=="symmetric_difference_update" or func == "abs":
return self.Add_tofirst(outs, operands)
elif func=="count" or func == "index" or func == "bytes" or func =="int" or func == "float" or func == "str" or func == "tuple" or func == "list":
return self.norej_add(outs, operands)
elif func=="set" or func == "dict" or func == "type" or func=="fromkeys" or func=="values" or func=="encode" or func=="endswith" or func=="startswith":
return self.norej_add(outs, operands)
elif func=="find" or func=="rfind"or func=="partition" or func=="rpartition" or func=="rindex" or func=="rsplit" or func=="split" or func=="splitlines":
return self.norej_add(outs, operands)
elif func in ['isalnum', 'isalpha', 'isascii', 'isdecimal', 'isdigit', 'isidentifier', 'islower', 'isnumeric', 'isprintable', 'isspace', 'istitle', 'isupper']:
return self.norej_add(outs, operands)
elif func=="isdisjoint" or func=="issubset" or func=="issuperset" or func =="all" or func == "any" or func == "bin" or func=="hex" or func == "oct" or func == "divmod" or func == "enumerate":
return self.norej_add(outs, operands)
elif func == "getattr" or func=="globals" or func=="hash" or func == "isinstance" or func == "len" or func == "map" or func == "max" or func=="min" or func == "pow" or func == "round" or func == "sorted":
return self.norej_add(outs, operands)
elif func == "sum":
return self.norej_add(outs, operands)
elif func == "extend":
# add rej_out into all the operands
rej_outs = outs.rejtypes
rej_list = []
for node in operands:
rej_types = deepcopy(node.rejtypes)
for t in rej_outs:
rej_types.append(t)
rej_types = TypeObject.removeRedundantTypes(rej_types)
rej_list.append(rej_types)
return rej_list
else:
return self.norej_add(outs, operands)
```
#### File: HiTyper/hityper/tdg_generator.py
```python
from hityper.tdg import *
from hityper.typeobject import *
from hityper import logger
from hityper.stdtypes import builtin_method_properties, stdtypes, inputtypemap
import ast
from copy import deepcopy, copy
import sys, getopt
from pycg.pycg import CallGraphGenerator
from pycg import formats
from pycg.utils.constants import CALL_GRAPH_OP
import json
logger.name = __name__
# A map from ast nodes to op strings
AST2Op = {}
AST2Op[ast.Add] = "+"
AST2Op[ast.Sub] = "-"
AST2Op[ast.Mult] = "*"
AST2Op[ast.Div] = "/"
AST2Op[ast.FloorDiv] = "//"
AST2Op[ast.Mod] = "%"
AST2Op[ast.Pow] = "%"
AST2Op[ast.LShift] = "<<"
AST2Op[ast.RShift] = ">>"
AST2Op[ast.BitOr] = "|"
AST2Op[ast.BitXor] = "^"
AST2Op[ast.BitAnd] = "&"
AST2Op[ast.MatMult] = "@"
AST2Op[ast.UAdd] = "+"
AST2Op[ast.USub] = "-"
AST2Op[ast.Not] = "not"
AST2Op[ast.Invert] = "~"
AST2Op[ast.And] = "and"
AST2Op[ast.Or] = "or"
AST2Op[ast.Eq] = "=="
AST2Op[ast.NotEq] = "!="
AST2Op[ast.Lt] = "<"
AST2Op[ast.LtE] = "<="
AST2Op[ast.Gt] = ">"
AST2Op[ast.GtE] = ">="
AST2Op[ast.Is] = "is"
AST2Op[ast.IsNot] = "isnot"
AST2Op[ast.In] = "in"
AST2Op[ast.NotIn] = "not in"
def transformConstant(node):
if not isinstance(node, ast.Constant):
raise ValueError("Only Support Constant AST node.")
if isinstance(node.value, str):
return TypeObject("Text", 0)
elif isinstance(node.value, bytes):
return TypeObject("bytes", 0)
elif isinstance(node.value, bool):
return TypeObject("bool", 0)
elif isinstance(node.value, float):
return TypeObject("float", 0)
elif isinstance(node.value, int):
return TypeObject("int", 0)
elif node.value == None:
return TypeObject("None", 0)
elif type(node.value) == type(Ellipsis):
return None
else:
raise TypeError("Currently we do not suupport constant of type: " + str(type(node.value)))
def Attribute2Str(node):
if isinstance(node, ast.Attribute):
return Attribute2Str(node.value) + "_@_" + node.attr
elif isinstance(node, ast.Name):
return node.id
elif isinstance(node, ast.Constant):
return transformConstant(node).type
elif isinstance(node,ast.Call):
return Attribute2Str(node.func) + "()"
else:
return "<Other>"
def Attribute2Str_Call(node):
temp1 = ''
temp1 += Attribute2Str(node.func) + "("
for argsnode in node.args:
temp1 += (Attribute2Str(argsnode)+"_&")
temp1 += ")"
return temp1
class AliasAnalyzer(ast.NodeVisitor):
def __init__(self, aliasgraph):
self.aliasgraph = aliasgraph
self.curfunc = None
self.curclass = None
def visit_ClassDef(self, node):
if self.curclass == None:
self.curclass = node.name
self.generic_visit(node)
self.curclass = None
def visit_FunctionDef(self, node):
if self.curfunc == None:
self.curfunc = node.name
self.generic_visit(node)
self.curfunc = None
def visit_AsyncFunctionDef(self, node):
self.visit_FunctionDef(node)
#check the assignment like self.xxx = xxx or cls.xxx = xxx, which may create alias
def visit_Assign(self, node):
if self.curclass == None:
classstr = "global"
else:
classstr = self.curclass
if self.curfunc == None:
funcstr = "global"
else:
funcstr = self.curfunc
if type(node.value) == ast.Attribute:
attrstr = Attribute2Str(node.value)
if "<Other>" in attrstr:
logger.warning("Failed to initialize attribute " + attrstr)
else:
p = self.aliasgraph.addAttribute(attrstr, classstr + "@" + funcstr)
if "()" not in attrstr:
for t in node.targets:
targetstr = Attribute2Str(t)
if "<Other>" not in targetstr:
q = self.aliasgraph.addAttribute(targetstr, classstr + "@" + funcstr)
q.alias.append(p)
p.alias.append(q)
elif type(node.value) == ast.Name:
attrtargets = []
for t in node.targets:
if type(t) == ast.Attribute or type(t) == ast.Name:
attrtargets.append(t)
attrstr = Attribute2Str(node.value)
p = self.aliasgraph.addAttribute(attrstr, classstr + "@" + funcstr)
if "()" not in attrstr:
for t in attrtargets:
targetstr = Attribute2Str(t)
if "<Other>" not in targetstr:
q = self.aliasgraph.addAttribute(targetstr, classstr + "@" + funcstr)
q.alias.append(p)
p.alias.append(q)
self.generic_visit(node)
def visit_AnnAssign(self, node):
if node.value != None:
if self.curclass == None:
classstr = "global"
else:
classstr = self.curclass
if self.curfunc == None:
funcstr = "global"
else:
funcstr = self.curfunc
if type(node.value) == ast.Attribute:
attrstr = Attribute2Str(node.value)
if "<Other>" in attrstr:
logger.warning("Failed to initialize attribute " + attrstr)
else:
p = self.aliasgraph.addAttribute(attrstr, classstr + "@" + funcstr)
if "()" not in attrstr:
targetstr = Attribute2Str(node.target)
if "<Other>" not in targetstr:
q = self.aliasgraph.addAttribute(targetstr, classstr + "@" + funcstr)
q.alias.append(p)
p.alias.append(q)
elif type(node.value) == ast.Name:
attrstr = Attribute2Str(node.value)
p = self.aliasgraph.addAttribute(attrstr, classstr + "@" + funcstr)
if "()" not in attrstr:
targetstr = Attribute2Str(node.target)
if "<Other>" not in targetstr:
q = self.aliasgraph.addAttribute(targetstr, classstr + "@" + funcstr)
q.alias.append(p)
p.alias.append(q)
self.generic_visit(node)
def visit_Attribute(self, node):
attrstr = Attribute2Str(node)
if "<Other>" in attrstr:
logger.warning("Unsupported attribute: " + attrstr)
if self.curclass == None:
classstr = "global"
else:
classstr = self.curclass
if self.curfunc == None:
funcstr = "global"
else:
funcstr = self.curfunc
p = self.aliasgraph.addAttribute(attrstr, classstr + "@" + funcstr)
if p == None:
logger.warning("Failed to initialize attribute ", attrstr)
def run(self, node):
self.visit(node)
return self.aliasgraph
class TDGGenerator(ast.NodeVisitor):
def __init__(self, filename, optimize, locations, usertypes, alias = 0, repo = None):
#usertypes
self.usertypes = self.processUserTypes(usertypes)
#type graph
self.GlobalTG = GlobalTypeGraph(filename, self.usertypes)
self.filename = filename
self.tgstack = []
#stacks and corresponding cusors
self.curfunc = -1
self.funcstack = []
self.curclass = -1
self.classstack = []
self.curop = -1
self.opstack = []
#variable maps
self.localvar2id = []
self.lastlocalvar = []
self.globalvar2id = {}
self.lastglobalvar = {}
self.attribute2id = []
self.lastattribute = []
#other info
self.modules = []
self.classnames = []
if isinstance(locations, list):
self.locations = locations
elif locations == None:
self.locations = locations
else:
logger.error("Invalid locations for generating TDGs.")
raise ValueError("Invalid locations for generating TDGs.")
self.visitedfuncs = []
self.withitemnames = []
self.withpos = []
#flags
self.asifcond = False
self.subscriptwrite = False
self.augassignread = False
self.forin = False
self.optimized = optimize
self.alias = alias
self.repo = repo
logger.info("Handling file #"+ filename)
def processUserTypes(self, usertype):
usertypes = []
for t in usertype["direct"]:
if t[2] not in usertypes:
usertypes.append(t[2])
for t in usertype["indirect"]:
if t[2] not in usertypes:
usertypes.append(t[2])
for t in usertype["unrecognized"]:
if t[2] not in usertypes:
usertypes.append(t[2])
for t in usertype["init"]:
if t not in usertypes:
usertypes.append(t[0])
return usertypes
def addNode(self, node):
if self.curclass == -1 and self.curfunc == -1:
self.GlobalTG.addNode(node)
elif self.curfunc != -1:
self.tgstack[self.curfunc].addNode(node)
def searchNode(self, nodetype, nodename, nodepos):
if self.curclass == -1 and self.curfunc == -1:
for node in self.GlobalTG.globalnodes:
if node.nodetype == nodetype and node.lineno == nodepos[0] and node.columnno == nodepos[1] and node.columnend == nodepos[2]:
if nodetype == "TypeGen" and node.op == nodename:
return node
elif nodetype == "Symbol" and node.symbol == nodename:
return node
elif self.curfunc != -1:
for node in self.tgstack[self.curfunc].nodes:
if node.nodetype == nodetype and node.lineno == nodepos[0] and node.columnno == nodepos[1] and node.columnend == nodepos[2]:
if nodetype == "TypeGen" and node.op == nodename:
return node
elif nodetype == "Symbol" and node.symbol == nodename:
return node
def extractTypeCondition(self, node, inverse = 1):
'''
if type(node) == ast.BoolOp and type(node.op) == ast.And:
return self.extractTypeCondition(node.values[0], inverse) + self.extractTypeCondition(node.values[1], inverse)
elif type(node) == ast.UnaryOp and type(node.op) == ast.Not:
return self.extractTypeCondition(node.operand, inverse * -1)
'''
# type(x) == y
if (type(node) == ast.Compare and type(node.left) == ast.Call and type(node.left.func) == ast.Name
and node.left.func.id == "type" and len(node.left.args) == 1 and len(node.ops) == 1 and type(node.ops[0]) in [ast.Eq, ast.NotEq]
and len(node.comparators) == 1 and type(node.comparators[0]) in [ast.Name, ast.Attribute]):
branchnode = BranchNode([], [], None)
self.opstack.append(branchnode)
self.curop += 1
self.visit(node.left.args[0])
typestr = Attribute2Str(node.comparators[0])
if typestr in stdtypes["overall"]:
typeobject = TypeObject(inputtypemap[typestr], 0)
else:
typeobject = TypeObject(typestr, 2)
if type(node.ops[0]) == ast.NotEq:
inverse = inverse * -1
if inverse == 1:
branchnode.addTypes([typeobject, None])
else:
branchnode.addTypes([None, typeobject])
self.opstack.pop(self.curop)
self.curop -= 1
self.addNode(branchnode)
return [branchnode]
# x is y
elif (type(node) == ast.Compare and (type(node.left) == ast.Name or type(node.left) == ast.Attribute) and len(node.ops) == 1
and type(node.ops[0]) in [ast.Is, ast.IsNot] and len(node.comparators) == 1 and type(node.comparators[0]) in [ast.Name, ast.Attribute, ast.Constant]):
branchnode = BranchNode([], [], None)
self.opstack.append(branchnode)
self.curop += 1
self.visit(node.left)
if type(node.comparators[0]) == ast.Constant:
typeobject = transformConstant(node.comparators[0])
else:
typestr = Attribute2Str(node.comparators[0])
if typestr in stdtypes["overall"]:
typeobject = TypeObject(inputtypemap[typestr], 0)
else:
typeobject = TypeObject(typestr, 2)
if type(node.ops[0]) == ast.IsNot:
inverse = inverse * -1
if inverse == 1:
branchnode.addTypes([typeobject, None])
else:
branchnode.addTypes([None, typeobject])
self.opstack.pop(self.curop)
self.curop -= 1
self.addNode(branchnode)
return [branchnode]
# isinstance(x,y)
elif (type(node) == ast.Call and type(node.func) == ast.Name and node.func.id == "isinstance"
and len(node.args) == 2 and type(node.args[1]) in [ast.Name, ast.Attribute]):
branchnode = BranchNode([], [], None)
self.opstack.append(branchnode)
self.curop += 1
self.visit(node.args[0])
typestr = Attribute2Str(node.args[1])
if typestr in stdtypes["overall"]:
typeobject = TypeObject(inputtypemap[typestr], 0)
else:
typeobject = TypeObject(typestr, 2)
if inverse == 1:
branchnode.addTypes([typeobject, None])
else:
branchnode.addTypes([None, typeobject])
self.opstack.pop(self.curop)
self.curop -= 1
self.addNode(branchnode)
return [branchnode]
else:
if type(node) != ast.Constant:
self.asifcond = True
self.visit(node)
self.asifcond = False
return []
def visitfield(self, field):
for node in field:
if node != None:
self.visit(node)
def buildmergename(self, nodes):
namestr = ""
for n in nodes:
if isinstance(n, SymbolNode):
if n.scope == "local":
namestr = namestr + str(n.symbol) + str(n.order)
elif n.scope == "attribute":
namestr = namestr + str(n.classname) + "." + str(n.symbol) + str(n.order)
elif isinstance(n, MergeNode):
namestr = namestr + "(" + str(n.mergevar) + ")"
elif isinstance(n, BranchNode):
namestr = namestr + "(" + "branch " +str(n.branchvar) + " )"
else:
namestr += n.name
namestr += ", "
namestr = namestr[: len(namestr) - 2]
return namestr
def clearlast(self):
if self.curclass != -1:
for key in self.lastattribute[self.curclass]:
self.lastattribute[self.curclass][key] = None
for key in self.lastglobalvar:
self.lastglobalvar[key] = None
def addMergeNodes(self):
varusemap = {}
if self.curfunc != -1 and len(self.tgstack[self.curfunc].loopbuffer) >= self.tgstack[self.curfunc].inloop:
loopbuffer = self.tgstack[self.curfunc].loopbuffer[self.tgstack[self.curfunc].inloop - 1]
elif self.curfunc != -1 and len(self.tgstack[self.curfunc].loopbuffer) < self.tgstack[self.curfunc].inloop:
return
elif len(self.GlobalTG.loopbuffer) >= self.GlobalTG.inloop:
loopbuffer = self.GlobalTG.loopbuffer[self.GlobalTG.inloop - 1]
else:
return
for n in loopbuffer:
if isinstance(n, SymbolNode):
if n.symbol not in varusemap:
varusemap[n.symbol] = n
elif n.order < varusemap[n.symbol].order:
varusemap[n.symbol] = n
#if first use is write, then do not add merge
changed = True
while(changed):
changed = False
for key in varusemap:
if len(varusemap[key].ins) > 1:
raise ValueError("Symbol nodes should not have more than 1 input nodes.")
elif varusemap[key].ctx != "Load":
del varusemap[key]
changed = True
break
#add merge nodes for the first use
for key in varusemap:
if key in self.lastglobalvar:
mergenode = MergeNode([varusemap[key].ins[0], self.lastglobalvar[key]], [varusemap[key]], self.buildmergename([varusemap[key].ins[0], self.lastglobalvar[key]]))
varusemap[key].ins[0].addOuts(mergenode)
varusemap[key].ins[0].outs.remove(varusemap[key])
self.lastglobalvar[key].addOuts(mergenode)
elif self.curclass != -1 and key in self.lastattribute[self.curclass]:
mergenode = MergeNode([varusemap[key].ins[0], self.lastattribute[self.curclass][key]], [varusemap[key]], self.buildmergename([varusemap[key].ins[0], self.lastattribute[self.curclass][key]]))
varusemap[key].ins[0].addOuts(mergenode)
varusemap[key].ins[0].outs.remove(varusemap[key])
self.lastattribute[self.curclass][key].addOuts(mergenode)
elif self.curfunc != -1 and key in self.lastlocalvar[self.curfunc]:
mergenode = MergeNode([varusemap[key].ins[0], self.lastlocalvar[self.curfunc][key]], [varusemap[key]], self.buildmergename([varusemap[key].ins[0], self.lastlocalvar[self.curfunc][key]]))
varusemap[key].ins[0].addOuts(mergenode)
varusemap[key].ins[0].outs.remove(varusemap[key])
self.lastlocalvar[self.curfunc][key].addOuts(mergenode)
varusemap[key].ins = [mergenode]
self.addNode(mergenode)
def addMerge4Except(self):
if self.curfunc != -1 and len(self.tgstack[self.curfunc].trybuffer) >= self.tgstack[self.curfunc].intry:
trybuffer = self.tgstack[self.curfunc].trybuffer[self.tgstack[self.curfunc].intry - 1]
elif self.curfunc != -1 and len(self.tgstack[self.curfunc].trybuffer) < self.tgstack[self.curfunc].intry:
#self.tgstack[self.curfunc].trybuffer.append({})
return
elif len(self.GlobalTG.trybuffer) >= self.GlobalTG.intry:
trybuffer = self.GlobalTG.trybuffer[self.GlobalTG.intry - 1]
elif len(self.GlobalTG.trybuffer) < self.GlobalTG.intry :
#self.GlobalTG.trybuffer.append({})
return
for key in trybuffer:
nodes = trybuffer[key]
if key in self.lastglobalvar and self.lastglobalvar[key] != None:
nodes = [self.lastglobalvar[key]] + nodes
mergenode = MergeNode(nodes, [], self.buildmergename(nodes))
self.lastglobalvar[key] = mergenode
for n in nodes:
n.addOuts(mergenode)
elif self.curclass != -1 and key in self.lastattribute[self.curclass] and self.lastattribute[self.curclass][key] != None:
nodes = [self.lastattribute[self.curclass][key]] + nodes
mergenode = MergeNode(nodes, [], self.buildmergename(nodes))
self.lastattribute[self.curclass][key] = mergenode
for n in nodes:
n.addOuts(mergenode)
elif self.curfunc != -1 and key in self.lastlocalvar[self.curfunc]:
nodes = [self.lastlocalvar[self.curfunc][key]] + nodes
mergenode = MergeNode(nodes, [], self.buildmergename(nodes))
self.lastlocalvar[self.curfunc][key] = mergenode
for n in nodes:
n.addOuts(mergenode)
else:
mergenode = MergeNode(nodes, [], self.buildmergename(nodes))
if self.curfunc != -1:
self.lastlocalvar[self.curfunc][key] = mergenode
elif self.curclass != -1:
self.lastattribute[self.curclass][key] = mergenode
else:
self.lastglobalvar[key] = mergenode
for n in nodes:
n.addOuts(mergenode)
self.addNode(mergenode)
def addMerge4Finally(self):
if self.curfunc != -1:
exceptbuffer = self.tgstack[self.curfunc].exceptbuffer
else:
exceptbuffer = self.GlobalTG.exceptbuffer
keys = []
for b in exceptbuffer:
keys += b.keys()
for key in keys:
nodes = []
if key in self.lastglobalvar and self.lastglobalvar[key] != None:
nodes.append(self.lastglobalvar[key])
elif self.curfunc != -1 and key in self.lastlocalvar[self.curfunc] and self.lastlocalvar[self.curfunc][key] != None:
nodes.append(self.lastlocalvar[self.curfunc][key])
elif self.curclass != -1 and key in self.lastattribute[self.curclass] and self.lastattribute[self.curclass][key] != None:
nodes.append(self.lastattribute[self.curclass][key])
for b in exceptbuffer:
if key in b:
nodes.append(b[key])
if len(nodes) == 1:
if nodes[0].scope == "local":
self.lastlocalvar[self.curfunc][key] = nodes[0]
elif nodes[0].scope == "global":
self.lastglobalvar[key] = nodes[0]
elif nodes[0].scope == "attribute":
self.lastattribute[self.curclass][key] = nodes[0]
else:
mergenode = MergeNode(nodes, [], self.buildmergename(nodes))
if self.curfunc != -1:
self.lastlocalvar[self.curfunc][key] = mergenode
elif self.curclass != -1:
self.lastattribute[self.curclass][key] = mergenode
else:
self.lastglobalvar[key] = mergenode
for n in nodes:
n.addOuts(mergenode)
self.addNode(mergenode)
def visit_Import(self, node):
for i in node.names:
if i.asname != None:
self.modules.append(i.asname)
else:
self.modules.append(i.name)
def visit_ImportFrom(self, node):
for i in node.names:
if i.asname != None:
self.modules.append(i.asname)
else:
self.modules.append(i.name)
def visit_ClassDef(self, node):
self.classstack.append(node.name)
self.curclass += 1
self.attribute2id.append({})
self.lastattribute.append({})
#add classname as a type
if len(self.classstack) == 1:
self.classnames.append(node.name)
self.visitfield(node.body)
self.classstack.pop(self.curclass)
self.attribute2id.pop(self.curclass)
self.lastattribute.pop(self.curclass)
self.curclass -= 1
def visit_FunctionDef(self, node):
if len(self.classstack) != 0:
funcname = node.name + "@"
for c in self.classstack:
funcname = funcname + c + ","
funcname = funcname[: len(funcname) -1]
else:
funcname = node.name + "@global"
if ((self.locations != None and funcname in self.locations) or self.locations == None) and funcname not in self.visitedfuncs:
self.visitedfuncs.append(funcname)
logger.debug("[3rd Pass: TDG Generation] Visiting Function #" + funcname + "# at Line: " + str(node.lineno))
self.funcstack.append(node.name)
self.curfunc += 1
if self.curclass != -1:
classname = self.classstack[self.curclass]
else:
classname = None
tg = TypeGraph(funcname, self.usertypes, self.filename, classname, self.GlobalTG)
tg.startlineno = node.lineno
self.tgstack.append(tg)
self.localvar2id.append({})
self.lastlocalvar.append({})
self.clearlast()
self.visit(node.args)
self.visitfield(node.body)
self.clearlast()
self.finalize(tg)
self.GlobalTG.addTG(tg)
self.funcstack.pop(self.curfunc)
self.tgstack.pop(self.curfunc)
self.localvar2id.pop(self.curfunc)
self.lastlocalvar.pop(self.curfunc)
self.curfunc -= 1
else:
pass
def visit_AsyncFunctionDef(self, node):
self.visit_FunctionDef(node)
def visit_arguments(self, node):
self.visitfield(node.args)
self.visitfield(node.kwonlyargs)
if node.kwarg != None:
self.visit(node.kwarg)
if node.vararg != None:
self.visit(node.vararg)
if len(node.defaults) != 0:
index_offset = len(node.args) - len(node.defaults)
for i in range(0, len(node.args)):
if i - index_offset >= 0:
index = i-index_offset
if type(node.defaults[index]) == ast.Constant and transformConstant(node.defaults[index]) != None:
typenode = TypeNode([self.lastlocalvar[self.curfunc][node.args[i].arg]], transformConstant(node.defaults[index]))
typenode.setNodePos(node.defaults[index].lineno, node.defaults[index].col_offset, node.defaults[index].end_col_offset)
self.lastlocalvar[self.curfunc][node.args[i].arg].addIns(typenode)
self.addNode(typenode)
elif type(node.defaults[index]) == ast.Name:
typegen = TypeGenNode("=", [], [self.lastlocalvar[self.curfunc][node.args[i].arg]])
typegen.setNodePos(node.defaults[index].lineno, node.defaults[index].col_offset, node.defaults[index].end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.defaults[index])
self.opstack.pop(self.curop)
self.curop -= 1
self.lastlocalvar[self.curfunc][node.args[i].arg].addIns(typegen)
self.addNode(typegen)
#we are not sure what initial value developers will give, it's rediculous
elif type(node.defaults[index]) != ast.Constant:
typegen = TypeGenNode("=", [], [self.lastlocalvar[self.curfunc][node.args[i].arg]])
typegen.setNodePos(node.defaults[index].lineno, node.defaults[index].col_offset, node.defaults[index].end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.defaults[index])
self.opstack.pop(self.curop)
self.curop -= 1
self.lastlocalvar[self.curfunc][node.args[i].arg].addIns(typegen)
self.addNode(typegen)
if len(node.kw_defaults) != 0:
for i in range(0, len(node.kw_defaults)):
if node.kw_defaults[i] != None:
if type(node.kw_defaults[i]) == ast.Constant and transformConstant(node.kw_defaults[i]) != None:
typenode = TypeNode([self.lastlocalvar[self.curfunc][node.kwonlyargs[i].arg]], transformConstant(node.kw_defaults[i]))
typenode.setNodePos(node.kw_defaults[i].lineno, node.kw_defaults[i].col_offset, node.kw_defaults[i].end_col_offset)
self.lastlocalvar[self.curclass][node.kwonlyargs[i].arg].addIns(typenode)
self.addNode(typenode)
elif type(node.kw_defaults[i]) == ast.Name:
typegen = TypeGenNode("=", [], [self.lastlocalvar[self.curfunc][node.kwonlyargs[i].arg]])
typegen.setNodePos(node.kw_defaults[i].lineno, node.kw_defaults[i].col_offset, node.kw_defaults[i].end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.kw_defaults[i])
self.opstack.pop(self.curop)
self.curop -= 1
self.lastlocalvar[self.curfunc][node.kwonlyargs[i].arg].addIns(typegen)
self.addNode(typegen)
#we are not sure what initial value developers will give, it's rediculous
elif type(node.kw_defaults[i]) != ast.Constant:
typegen = TypeGenNode("=", [], [self.lastlocalvar[self.curfunc][node.kwonlyargs[i].arg]])
typegen.setNodePos(node.kw_defaults[i].lineno, node.kw_defaults[i].col_offset, node.kw_defaults[i].end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.kw_defaults[i])
self.opstack.pop(self.curop)
self.curop -= 1
self.lastlocalvar[self.curfunc][node.kwonlyargs[i].arg].addIns(typegen)
self.addNode(typegen)
def visit_arg(self, node):
if node.arg != "self":
self.localvar2id[self.curfunc][node.arg] = 0
symbol = SymbolNode([], [], node.arg + "(arg)", 0, ctx = "Arg")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastlocalvar[self.curfunc][node.arg] = symbol
self.tgstack[self.curfunc].addNode(symbol)
def visit_keyword(self, node):
self.visit(node.value)
def visit_Assign(self, node):
#TypeGenNode
typegen = TypeGenNode("=", [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.value)
self.visitfield(node.targets)
self.opstack.pop(self.curop)
self.curop -= 1
if self.curop != -1:
typegen.addOuts(self.opstack[self.curop])
self.opstack[self.curop].addIns(typegen)
self.addNode(typegen)
def visit_AugAssign(self, node):
typegen = TypeGenNode(AST2Op[type(node.op)], [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.augassignread = True
self.visit(node.target)
self.visit(node.value)
self.augassignread = False
self.opstack.pop(self.curop)
self.curop -= 1
typegen2 = TypeGenNode("=", [typegen], [])
typegen2.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen2)
self.curop += 1
self.visit(node.target)
self.opstack.pop(self.curop)
self.curop -= 1
typegen.addOuts(typegen2)
if self.curop != -1 and not self.asifcond:
typegen2.addOuts(self.opstack[self.curop])
self.opstack[self.curop].addIns(typegen2)
self.addNode(typegen)
self.addNode(typegen2)
def visit_AnnAssign(self, node):
if node.value != None:
typegen = TypeGenNode("=", [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
if node.value != None:
self.visit(node.value)
if node.target != None:
self.visit(node.target)
self.opstack.pop(self.curop)
self.curop -= 1
if self.curop != -1:
typegen.addOuts(self.opstack[self.curop])
self.opstack[self.curop].addIns(typegen)
self.addNode(typegen)
def visit_Call(self, node):
asifcond = self.asifcond
if self.asifcond:
self.asifcond = False
#Type Gen Node
typegen = TypeGenNode("call", [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
#case 1: independent functions, such as func()
if isinstance(node.func, ast.Name):
typegen.setFunc(node.func.id)
self.visitfield(node.args)
self.visitfield(node.keywords)
#case 2: property functions, such as a.append()
elif isinstance(node.func, ast.Attribute):
attrstr = Attribute2Str(node.func)
if attrstr.count("_@_") > 1:
typegen.setFunc(attrstr)
self.visitfield(node.args)
self.visitfield(node.keywords)
else:
typegen.setFunc(node.func.attr)
if type(node.func.value) == ast.Name and node.func.value.id == "self":
self.visit(node.func.value)
typegen.attr = "self"
else:
self.visit(node.func.value)
typegen.attr = Attribute2Str(node.func.value)
self.visitfield(node.args)
self.visitfield(node.keywords)
if node.func.attr in builtin_method_properties["self-changable"]["overall"]:
if isinstance(typegen.ins[0], SymbolNode):
symbol = SymbolNode([typegen], [], typegen.ins[0].symbol, 0, classname=typegen.ins[0].classname, scope=typegen.ins[0].scope)
symbol.setNodePos(typegen.ins[0].lineno, typegen.ins[0].columnno, typegen.ins[0].columnend)
symbol.ctx = "Write"
if self.curfunc != -1:
if symbol.symbol in self.localvar2id[self.curfunc]:
self.localvar2id[self.curfunc][symbol.symbol] += 1
else:
self.localvar2id[self.curfunc][symbol.symbol] = 0
self.lastlocalvar[self.curfunc][symbol.symbol] = symbol
symbol.order = self.localvar2id[self.curfunc][symbol.symbol]
elif symbol.symbol in self.globalvar2id:
self.globalvar2id[symbol.symbol] += 1
symbol.order = self.globalvar2id[symbol.symbol]
self.lastglobalvar[symbol.symbol] = symbol
self.addNode(symbol)
typegen.addOuts(symbol)
self.opstack.pop(self.curop)
self.curop -= 1
if self.curop != -1 and not asifcond:
typegen.addOuts(self.opstack[self.curop])
self.opstack[self.curop].addIns(typegen)
self.addNode(typegen)
def visit_Subscript(self, node):
asifcond = self.asifcond
if self.asifcond:
self.asifcond = False
subscriptwrite = self.subscriptwrite
if self.subscriptwrite:
self.subscriptwrite = False
if (type(node.ctx) == ast.Store or subscriptwrite) and not self.augassignread:
typegen = TypeGenNode("Subscript_Write", [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
if not subscriptwrite:
self.visit(node.value)
elif type(node.value) == ast.Subscript:
pre_node = self.searchNode("TypeGen", "Subscript_Read", [node.value.lineno, node.value.col_offset, node.value.end_col_offset])
if pre_node != None:
typegen.addIns(pre_node)
pre_node.addOuts(typegen)
else:
self.visit(node.value)
self.visit(node.slice)
self.opstack.pop(self.curop)
self.curop -= 1
if isinstance(typegen.ins[0], SymbolNode):
symbol = SymbolNode([typegen], [], typegen.ins[0].symbol, 0, classname=typegen.ins[0].classname, scope=typegen.ins[0].scope)
symbol.setNodePos(typegen.ins[0].lineno, typegen.ins[0].columnno, typegen.ins[0].columnend)
symbol.ctx = "Write"
if symbol.symbol in self.globalvar2id:
self.globalvar2id[symbol.symbol] += 1
symbol.order = self.globalvar2id[symbol.symbol]
self.lastglobalvar[symbol.symbol] = symbol
elif self.curfunc != -1 and symbol.symbol in self.localvar2id[self.curfunc]:
self.localvar2id[self.curfunc][symbol.symbol] += 1
symbol.order = self.localvar2id[self.curfunc][symbol.symbol]
self.lastlocalvar[self.curfunc][symbol.symbol] = symbol
elif self.curclass != -1 and symbol.symbol in self.attribute2id[self.curclass]:
self.attribute2id[self.curclass][symbol.symbol] += 1
symbol.order = self.attribute2id[self.curclass][symbol.symbol]
self.lastattribute[self.curclass][symbol.symbol] = symbol
self.addNode(symbol)
typegen.addOuts(symbol)
elif isinstance(node.value, ast.Name) and node.value.id == "self":
symbol = SymbolNode([typegen], [], "self", 0, classname=self.classstack[self.curclass], scope=self.funcstack[self.curfunc])
symbol.setNodePos(typegen.ins[0].lineno, typegen.ins[0].columnno, typegen.ins[0].columnend)
symbol.ctx = "Write"
if self.curclass != -1:
if symbol.symbol in self.attribute2id[self.curclass]:
self.attribute2id[self.curclass][symbol.symbol] += 1
symbol.order = self.attribute2id[self.curclass][symbol.symbol]
self.lastattribute[self.curclass][symbol.symbol] = symbol
else:
self.attribute2id[self.curclass][symbol.symbol] = 0
symbol.order = 0
self.lastattribute[self.curclass][symbol.symbol] = symbol
self.addNode(symbol)
typegen.addOuts(symbol)
else:
self.opstack.append(typegen)
self.curop += 1
self.subscriptwrite = True
self.visit(node.value)
self.opstack.pop(self.curop)
self.curop -= 1
self.subscriptwrite = False
if not asifcond and self.curop != -1:
typegen.addIns(self.opstack[self.curop])
self.opstack[self.curop].addOuts(typegen)
self.addNode(typegen)
elif type(node.ctx) == ast.Load or self.augassignread:
typegen = TypeGenNode("Subscript_Read", [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.value)
self.visit(node.slice)
self.opstack.pop(self.curop)
self.curop -= 1
if not asifcond and self.curop != -1:
typegen.addOuts(self.opstack[self.curop])
self.opstack[self.curop].addIns(typegen)
self.addNode(typegen)
def visit_Slice(self, node):
self.generic_visit(node)
def visit_Index(self, node):
self.visit(node.value)
def visit_BinOp(self, node):
asifcond = self.asifcond
if self.asifcond == True:
self.asifcond = False
#Type Gen Node
typegen = TypeGenNode(AST2Op[type(node.op)], [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.left)
self.visit(node.right)
self.opstack.pop(self.curop)
self.curop -= 1
if not asifcond and self.curop != -1:
typegen.addOuts(self.opstack[self.curop])
self.opstack[self.curop].addIns(typegen)
#typegen.performTypingRules()
self.addNode(typegen)
def visit_UnaryOp(self, node):
asifcond = self.asifcond
if self.asifcond:
self.asifcond = False
typegen = TypeGenNode(AST2Op[type(node.op)], [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.operand)
self.opstack.pop(self.curop)
self.curop -= 1
if not asifcond and self.curop != -1:
typegen.addOuts(self.opstack[self.curop])
self.opstack[self.curop].addIns(typegen)
#typegen.performTypingRules()
self.addNode(typegen)
def visit_Constant(self, node):
if self.curop != -1:
typeobject = transformConstant(node)
if typeobject != None:
typenode = TypeNode([self.opstack[self.curop]], typeobject)
typenode.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack[self.curop].addIns(typenode)
self.addNode(typenode)
def visit_Name(self, node):
if (node.id in stdtypes["overall"] or node.id in self.classnames or node.id in self.classstack) and type(node.ctx) == ast.Load:
if self.curop != -1:
if node.id in stdtypes["overall"]:
#typeobject = TypeObject(inputtypemap[node.id], 0)
typeobject = TypeObject(node.id, 0)
else:
typeobject = TypeObject(node.id, 2)
typenode = TypeNode([self.opstack[self.curop]], typeobject)
typenode.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack[self.curop].addIns(typenode)
self.addNode(typenode)
elif node.id in stdtypes["errors"] or node.id in stdtypes["warnings"]:
if self.curop != -1:
typeobject = TypeObject(node.id, 0)
typenode = TypeNode([self.opstack[self.curop]], typeobject)
typenode.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack[self.curop].addIns(typenode)
self.addNode(typenode)
elif node.id == "self" and type(node.ctx) == ast.Load and not self.subscriptwrite:
if self.curclass == -1:
raise ValueError("self should be used within class.")
typeobject = TypeObject(self.classstack[self.curclass], 2)
typenode = TypeNode([self.opstack[self.curop]], typeobject)
typenode.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
if self.curop != -1 and not self.asifcond:
self.opstack[self.curop].addIns(typenode)
self.addNode(typenode)
elif type(node.ctx) == ast.Load or (self.augassignread and not self.forin):
#case 1: global variable
if (self.curclass == -1 and self.curfunc == -1) or node.id in self.globalvar2id:
if node.id not in self.globalvar2id:
if node.id in self.modules:
if self.curop != -1 and not self.asifcond:
symbol = SymbolNode([], [self.opstack[self.curop]], node.id, 0, scope = "module")
else:
symbol = SymbolNode([], [], node.id, 0, scope = "module")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
if self.curop != -1 and not self.asifcond:
self.opstack[self.curop].addIns(symbol)
self.addNode(symbol)
else:
if node.id.startswith("__") and node.id.endswith("__"):
self.globalvar2id[node.id] = 0
if self.curop != -1 and not self.asifcond:
symbol = SymbolNode([], [self.opstack[self.curop]], node.id, self.globalvar2id[node.id], scope = "global")
else:
symbol = SymbolNode([], [], node.id, self.globalvar2id[node.id], scope = "global")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastglobalvar[node.id] = symbol
if self.curop != -1 and not self.asifcond:
self.opstack[self.curop].addIns(symbol)
self.addNode(symbol)
else:
self.globalvar2id[node.id] = 0
if self.curop != -1 and not self.asifcond:
symbol = SymbolNode([], [self.opstack[self.curop]], node.id, self.globalvar2id[node.id], scope = "global")
else:
symbol = SymbolNode([], [], node.id, self.globalvar2id[node.id], scope = "global")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastglobalvar[node.id] = symbol
if self.curop != -1 and not self.asifcond:
self.opstack[self.curop].addIns(symbol)
self.addNode(symbol)
logger.warning(node.id + " variable is read but it is not previously defined!")
else:
self.globalvar2id[node.id] += 1
if self.curop != -1 and not self.asifcond:
#the first time being read in a function
if node.id not in self.lastglobalvar or self.lastglobalvar[node.id] == None:
symbol = SymbolNode([], [self.opstack[self.curop]], node.id, self.globalvar2id[node.id], scope = "global")
if self.curfunc != -1:
self.GlobalTG.addNode(symbol)
else:
symbol = SymbolNode([self.lastglobalvar[node.id]], [self.opstack[self.curop]], node.id, self.globalvar2id[node.id], scope = "global")
self.lastglobalvar[node.id].addOuts(symbol)
else:
#the first time being read in a function
if node.id not in self.lastglobalvar or self.lastglobalvar[node.id] == None:
symbol = SymbolNode([], [], node.id, self.globalvar2id[node.id], scope = "global")
if self.curfunc != -1:
self.GlobalTG.addNode(symbol)
else:
symbol = SymbolNode([self.lastglobalvar[node.id]], [], node.id, self.globalvar2id[node.id], scope = "global")
self.lastglobalvar[node.id].addOuts(symbol)
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastglobalvar[node.id] = symbol
if self.curop != -1 and not self.asifcond:
self.opstack[self.curop].addIns(symbol)
self.addNode(symbol)
#case 2: local variable
elif self.curfunc != -1:
if node.id not in self.localvar2id[self.curfunc] or node.id not in self.lastlocalvar[self.curfunc]:
if node.id in self.modules:
if self.curop != -1 and not self.asifcond:
symbol = SymbolNode([], [self.opstack[self.curop]], node.id, 0, scope = "module")
else:
symbol = SymbolNode([], [], node.id, 0, scope = "module")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
if self.curop != -1 and not self.asifcond:
self.opstack[self.curop].addIns(symbol)
self.addNode(symbol)
elif node.id.startswith("__") and node.id.endswith("__"):
self.localvar2id[self.curfunc][node.id] = 0
if self.curop != -1 and not self.asifcond:
symbol = SymbolNode([], [self.opstack[self.curop]], node.id, self.localvar2id[self.curfunc][node.id])
else:
symbol = SymbolNode([], [], node.id, self.localvar2id[self.curfunc][node.id])
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastlocalvar[self.curfunc][node.id] = symbol
if self.curop != -1 and not self.asifcond:
self.opstack[self.curop].addIns(symbol)
self.tgstack[self.curfunc].addNode(symbol)
else:
self.localvar2id[self.curfunc][node.id] = 0
if self.curop != -1 and not self.asifcond:
symbol = SymbolNode([], [self.opstack[self.curop]], node.id, self.localvar2id[self.curfunc][node.id])
else:
symbol = SymbolNode([], [], node.id, self.localvar2id[self.curfunc][node.id])
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastlocalvar[self.curfunc][node.id] = symbol
if self.curop != -1 and not self.asifcond:
self.opstack[self.curop].addIns(symbol)
self.tgstack[self.curfunc].addNode(symbol)
logger.warning(node.id + " variable is read but it is not previously defined! Line: " + str(node.lineno))
else:
self.localvar2id[self.curfunc][node.id] += 1
if self.curop != -1 and not self.asifcond:
symbol = SymbolNode([self.lastlocalvar[self.curfunc][node.id]], [self.opstack[self.curop]], node.id, self.localvar2id[self.curfunc][node.id])
else:
symbol = SymbolNode([self.lastlocalvar[self.curfunc][node.id]], [], node.id, self.localvar2id[self.curfunc][node.id])
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastlocalvar[self.curfunc][node.id].addOuts(symbol)
self.lastlocalvar[self.curfunc][node.id] = symbol
if self.curop != -1 and not self.asifcond:
self.opstack[self.curop].addIns(symbol)
self.tgstack[self.curfunc].addNode(symbol)
elif type(node.ctx) == ast.Del:
#case 1: global variable
if (self.curclass == -1 and self.curfunc == -1) or node.id in self.globalvar2id:
if node.id not in self.globalvar2id:
pass
#raise ValueError( node.id + "does not exist.")
else:
del self.globalvar2id[node.id]
del self.lastglobalvar[node.id]
#case 2: local variable
if self.curfunc != -1:
if node.id not in self.localvar2id[self.curfunc]:
raise ValueError( node.id + "does not exist.")
else:
del self.localvar2id[self.curfunc][node.id]
del self.lastlocalvar[self.curfunc][node.id]
elif type(node.ctx) == ast.Store:
#case 1: global variable
if (self.curclass == -1 and self.curfunc == -1) or node.id in self.globalvar2id:
if node.id not in self.globalvar2id:
self.globalvar2id[node.id] = 0
else:
self.globalvar2id[node.id] += 1
symbol = SymbolNode([self.opstack[self.curop]], [], node.id, self.globalvar2id[node.id], scope = "global", ctx = "Write")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack[self.curop].addOuts(symbol)
self.lastglobalvar[node.id] = symbol
self.addNode(symbol)
#case 2: local variables
elif self.curfunc != -1:
if node.id not in self.localvar2id[self.curfunc]:
self.localvar2id[self.curfunc][node.id] = 0
else:
self.localvar2id[self.curfunc][node.id] += 1
symbol = SymbolNode([self.opstack[self.curop]], [], node.id, self.localvar2id[self.curfunc][node.id], ctx = "Write")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastlocalvar[self.curfunc][node.id] = symbol
self.opstack[self.curop].addOuts(symbol)
self.tgstack[self.curfunc].addNode(symbol)
else:
raise ValueError("Do not support such Name node.")
def visit_Attribute(self, node):
attrstr = Attribute2Str(node)
asifcond = self.asifcond
if self.asifcond == True:
self.asifcond = False
if "<Other>" in attrstr:
typegen = TypeGenNode(".", [], [], attr = node.attr)
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.value)
self.opstack.pop(self.curop)
self.curop -= 1
self.addNode(typegen)
if not asifcond and self.curop != -1:
typegen.addOuts(self.opstack[self.curop])
self.opstack[self.curop].addIns(typegen)
#case 1: depth = 2
elif attrstr.count("_@_") == 1:
if type(node.value) == ast.Constant:
typegen = TypeGenNode(".", [], [], attr = node.attr)
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.value)
self.opstack.pop(self.curop)
self.curop -= 1
self.addNode(typegen)
if not asifcond and self.curop != -1:
typegen.addOuts(self.opstack[self.curop])
self.opstack[self.curop].addIns(typegen)
elif type(node.value) == ast.Name and node.value.id == "self":
if type(node.ctx) == ast.Load or self.augassignread:
if self.curclass == -1:
raise ValueError("self should be used within a class.")
if node.attr not in self.attribute2id[self.curclass]:
self.attribute2id[self.curclass][node.attr] = 0
if self.curop != -1 and not asifcond:
symbol = SymbolNode([], [self.opstack[self.curop]], node.attr, self.attribute2id[self.curclass][node.attr], classname = self.classstack[self.curclass], scope = "attribute")
else:
symbol = SymbolNode([], [], node.attr, self.attribute2id[self.curclass][node.attr], classname = self.classstack[self.curclass], scope = "attribute")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastattribute[self.curclass][node.attr] = symbol
if self.curop != -1 and not asifcond:
self.opstack[self.curop].addIns(symbol)
self.GlobalTG.addNode(symbol)
if self.curfunc != -1:
self.tgstack[self.curfunc].addNode(symbol)
else:
self.attribute2id[self.curclass][node.attr] += 1
if self.curop != -1 and not asifcond:
#occur first time in else branch of if statement
if node.attr not in self.lastattribute[self.curclass] or self.lastattribute[self.curclass][node.attr] == None:
symbol = SymbolNode([], [self.opstack[self.curop]], node.attr, self.attribute2id[self.curclass][node.attr], classname = self.classstack[self.curclass], scope = "attribute")
if self.curfunc != -1:
self.GlobalTG.addNode(symbol)
else:
symbol = SymbolNode([self.lastattribute[self.curclass][node.attr]], [self.opstack[self.curop]], node.attr, self.attribute2id[self.curclass][node.attr], classname = self.classstack[self.curclass], scope = "attribute")
self.lastattribute[self.curclass][node.attr].addOuts(symbol)
else:
if node.attr not in self.lastattribute[self.curclass] or self.lastattribute[self.curclass][node.attr] == None:
symbol = SymbolNode([], [], node.attr, self.attribute2id[self.curclass][node.attr], classname = self.classstack[self.curclass], scope = "attribute")
if self.curfunc != -1:
self.GlobalTG.addNode(symbol)
else:
symbol = SymbolNode([self.lastattribute[self.curclass][node.attr]], [], node.attr, self.attribute2id[self.curclass][node.attr], classname = self.classstack[self.curclass], scope = "attribute")
self.lastattribute[self.curclass][node.attr].addOuts(symbol)
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastattribute[self.curclass][node.attr] = symbol
if self.curop != -1 and not asifcond:
self.opstack[self.curop].addIns(symbol)
self.addNode(symbol)
elif type(node.ctx) == ast.Del:
if node.attr not in self.attribute2id[self.curclass]:
pass
#raise ValueError(node.attr + "does not exist.")
else:
del self.attribute2id[self.curclass][node.attr]
del self.lastattribute[self.curclass][node.attr]
elif type(node.ctx) == ast.Store:
if node.attr not in self.attribute2id[self.curclass]:
self.attribute2id[self.curclass][node.attr] = 0
else:
self.attribute2id[self.curclass][node.attr] += 1
symbol = SymbolNode([self.opstack[self.curop]], [], node.attr, self.attribute2id[self.curclass][node.attr], classname = self.classstack[self.curclass], scope = "attribute", ctx = "Write")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastattribute[self.curclass][node.attr] = symbol
self.opstack[self.curop].addOuts(symbol)
self.GlobalTG.addNode(symbol)
if self.curfunc != -1:
self.tgstack[self.curfunc].addNode(symbol)
else:
if ((type(node.ctx) == ast.Load or self.augassignread) and
(attrstr not in self.globalvar2id or (attrstr in self.globalvar2id and attrstr not in self.lastglobalvar)) and
(self.curfunc == -1 or (attrstr not in self.localvar2id[self.curfunc] or (attrstr in self.localvar2id[self.curfunc] and attrstr not in self.lastlocalvar[self.curfunc])))):
typegen = TypeGenNode(".", [], [], attr = node.attr)
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.value)
self.opstack.pop(self.curop)
self.curop -= 1
self.addNode(typegen)
if self.curfunc == -1:
self.globalvar2id[attrstr] = 0
if self.curop != -1 and not asifcond:
symbol = SymbolNode([typegen], [self.opstack[self.curop]], attrstr, 0, scope = "global")
else:
symbol = SymbolNode([typegen], [], attrstr, 0, scope = "global")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastglobalvar[attrstr] = symbol
if self.curop != -1 and not asifcond:
self.opstack[self.curop].addIns(symbol)
self.GlobalTG.addNode(symbol)
elif self.curfunc != -1:
self.localvar2id[self.curfunc][attrstr] = 0
if self.curop != -1 and not asifcond:
symbol = SymbolNode([typegen], [self.opstack[self.curop]], attrstr, 0)
else:
symbol = SymbolNode([typegen], [], attrstr, 0)
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastlocalvar[self.curfunc][attrstr] = symbol
if self.curop != -1 and not asifcond:
self.opstack[self.curop].addIns(symbol)
self.tgstack[self.curfunc].addNode(symbol)
typegen.addOuts(symbol)
elif type(node.ctx) == ast.Store and attrstr not in self.globalvar2id and (self.curfunc == -1 or attrstr not in self.localvar2id[self.curfunc]):
if self.curclass == -1 and self.curfunc == -1:
self.globalvar2id[attrstr] = 0
symbol = SymbolNode([self.opstack[self.curop]], [], attrstr, 0, scope = "global", ctx = "Write")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastglobalvar[attrstr] = symbol
self.opstack[self.curop].addOuts(symbol)
self.GlobalTG.addNode(symbol)
elif self.curfunc != -1:
self.localvar2id[self.curfunc][attrstr] = 0
symbol = SymbolNode([self.opstack[self.curop]], [], attrstr, 0, ctx = "Write")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastlocalvar[self.curfunc][attrstr] = symbol
self.opstack[self.curop].addOuts(symbol)
self.tgstack[self.curfunc].addNode(symbol)
elif attrstr in self.globalvar2id or self.curfunc == -1:
if type(node.ctx) == ast.Load or self.augassignread:
self.globalvar2id[attrstr] += 1
if self.curop != -1 and not asifcond:
if self.lastglobalvar[attrstr] == None:
symbol = SymbolNode([], [self.opstack[self.curop]], attrstr, self.globalvar2id[attrstr], scope = "global")
if self.curfunc != -1:
self.GlobalTG.addNode(symbol)
else:
symbol = SymbolNode([self.lastglobalvar[attrstr]], [self.opstack[self.curop]], attrstr, self.globalvar2id[attrstr], scope = "global")
self.lastglobalvar[attrstr].addOuts(symbol)
else:
if self.lastglobalvar[attrstr] == None:
symbol = SymbolNode([self.lastglobalvar[attrstr]], [], attrstr, self.globalvar2id[attrstr], scope = "global")
if self.curfunc != -1:
self.GlobalTG.addNode(symbol)
else:
symbol = SymbolNode([self.lastglobalvar[attrstr]], [], attrstr, self.globalvar2id[attrstr], scope = "global")
self.lastglobalvar[attrstr].addOuts(symbol)
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastglobalvar[attrstr] = symbol
if self.curop != -1 and not asifcond:
self.opstack[self.curop].addIns(symbol)
self.addNode(symbol)
elif type(node.ctx) == ast.Del:
del self.globalvar2id[attrstr]
del self.lastglobalvar[attrstr]
elif type(node.ctx) == ast.Store:
self.globalvar2id[attrstr] += 1
symbol = SymbolNode([self.opstack[self.curop]], [], attrstr, self.globalvar2id[attrstr], scope = "global", ctx = "Write")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastglobalvar[attrstr] = symbol
self.opstack[self.curop].addOuts(symbol)
self.GlobalTG.addNode(symbol)
elif attrstr in self.localvar2id[self.curfunc]:
if type(node.ctx) == ast.Load or self.augassignread:
self.localvar2id[self.curfunc][attrstr] += 1
if self.curop != -1 and not asifcond:
symbol = SymbolNode([self.lastlocalvar[self.curfunc][attrstr]], [self.opstack[self.curop]], attrstr, self.localvar2id[self.curfunc][attrstr])
else:
symbol = SymbolNode([self.lastlocalvar[self.curfunc][attrstr]], [], attrstr, self.localvar2id[self.curfunc][attrstr])
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastlocalvar[self.curfunc][attrstr].addOuts(symbol)
self.lastlocalvar[self.curfunc][attrstr] = symbol
if self.curop != -1 and not asifcond:
self.opstack[self.curop].addIns(symbol)
self.tgstack[self.curfunc].addNode(symbol)
elif type(node.ctx) == ast.Del:
del self.localvar2id[self.curfunc][attrstr]
del self.lastlocalvar[self.curfunc][attrstr]
elif type(node.ctx) == ast.Store:
self.localvar2id[self.curfunc][attrstr] += 1
symbol = SymbolNode([self.opstack[self.curop]], [], attrstr, self.localvar2id[self.curfunc][attrstr], ctx = "Write")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastlocalvar[self.curfunc][attrstr] = symbol
self.opstack[self.curop].addOuts(symbol)
self.tgstack[self.curfunc].addNode(symbol)
#case 2: depth > 2
else:
if type(node.ctx) == ast.Load or self.augassignread:
#case 1: global variables
if (self.curclass == -1 and self.curfunc == -1) or attrstr in self.globalvar2id:
if attrstr not in self.globalvar2id:
self.globalvar2id[attrstr] = 0
if self.curop != -1 and not asifcond:
symbol = SymbolNode([], [self.opstack[self.curop]], attrstr, self.globalvar2id[attrstr], scope = "global")
else:
symbol = SymbolNode([], [], attrstr, self.globalvar2id[attrstr], scope = "global")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
if self.curop != -1 and not asifcond:
self.opstack[self.curop].addIns(symbol)
self.lastglobalvar[attrstr] = symbol
self.GlobalTG.addNode(symbol)
else:
self.globalvar2id[attrstr] += 1
if self.curop != -1 and not asifcond:
if self.lastglobalvar[attrstr] == None:
symbol = SymbolNode([], [self.opstack[self.curop]], attrstr, self.globalvar2id[attrstr], scope = "global")
if self.curfunc != -1:
self.GlobalTG.addNode(symbol)
else:
symbol = SymbolNode([self.lastglobalvar[attrstr]], [self.opstack[self.curop]], attrstr, self.globalvar2id[attrstr], scope = "global")
self.lastglobalvar[attrstr].addOuts(symbol)
else:
if self.lastglobalvar[attrstr] == None:
symbol = SymbolNode([], [], attrstr, self.globalvar2id[attrstr], scope = "global")
if self.curfunc != -1:
self.GlobalTG.addNode(symbol)
else:
symbol = SymbolNode([self.lastglobalvar[attrstr]], [], attrstr, self.globalvar2id[attrstr], scope = "global")
self.lastglobalvar[attrstr].addOuts(symbol)
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
if self.curop != -1 and not asifcond:
self.opstack[self.curop].addIns(symbol)
self.lastglobalvar[attrstr] = symbol
self.addNode(symbol)
#case 2: local variables
elif self.curfunc != -1:
if attrstr not in self.localvar2id[self.curfunc] or attrstr not in self.lastlocalvar[self.curfunc]:
if attrstr not in self.localvar2id[self.curfunc]:
self.localvar2id[self.curfunc][attrstr] = 0
else:
self.localvar2id[self.curfunc][attrstr] += 1
if self.curop != -1 and not asifcond:
symbol = SymbolNode([], [self.opstack[self.curop]], attrstr, self.localvar2id[self.curfunc][attrstr])
else:
symbol = SymbolNode([], [], attrstr, self.localvar2id[self.curfunc][attrstr])
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
if self.curop != -1 and not asifcond:
self.opstack[self.curop].addIns(symbol)
self.lastlocalvar[self.curfunc][attrstr] = symbol
self.tgstack[self.curfunc].addNode(symbol)
else:
self.localvar2id[self.curfunc][attrstr] += 1
if self.curop != -1 and not asifcond:
symbol = SymbolNode([self.lastlocalvar[self.curfunc][attrstr]], [self.opstack[self.curop]], attrstr, self.localvar2id[self.curfunc][attrstr])
else:
symbol = SymbolNode([self.lastlocalvar[self.curfunc][attrstr]], [], attrstr, self.localvar2id[self.curfunc][attrstr])
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastlocalvar[self.curfunc][attrstr].addOuts(symbol)
if self.curop != -1 and not asifcond:
self.opstack[self.curop].addIns(symbol)
self.lastlocalvar[self.curfunc][attrstr] = symbol
self.tgstack[self.curfunc].addNode(symbol)
elif type(node.ctx) == ast.Del:
#case 1: global variables
if (self.curclass == -1 and self.curfunc == -1) or attrstr in self.globalvar2id:
del self.lastglobalvar[attrstr]
del self.globalvar2id[attrstr]
#case 2: local variables
elif self.curfunc != -1:
del self.lastlocalvar[self.curfunc][attrstr]
del self.localvar2id[self.curfunc][attrstr]
elif type(node.ctx) == ast.Store:
#case 1: global variables
if (self.curclass == -1 and self.curfunc == -1) or attrstr in self.globalvar2id:
if attrstr not in self.globalvar2id:
self.globalvar2id[attrstr] = 0
else:
self.globalvar2id[attrstr] += 1
symbol = SymbolNode([self.opstack[self.curop]], [], attrstr, self.globalvar2id[attrstr], scope = "global", ctx = "Write")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastglobalvar[attrstr] = symbol
self.opstack[self.curop].addOuts(symbol)
self.GlobalTG.addNode(symbol)
#case 2: local variables
if self.curfunc != -1:
if attrstr not in self.localvar2id[self.curfunc]:
self.localvar2id[self.curfunc][attrstr] = 0
else:
self.localvar2id[self.curfunc][attrstr] += 1
symbol = SymbolNode([self.opstack[self.curop]], [], attrstr, self.localvar2id[self.curfunc][attrstr], ctx = "Write")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.lastlocalvar[self.curfunc][attrstr] = symbol
self.opstack[self.curop].addOuts(symbol)
self.tgstack[self.curfunc].addNode(symbol)
def visit_IfExp(self, node):
typegen = TypeGenNode("IfExp", [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.visit_If(node)
self.opstack.pop(self.curop)
self.curop -= 1
if self.curop != -1:
typegen.addOuts(self.opstack[self.curop])
self.opstack[self.curop].addIns(typegen)
self.addNode(typegen)
def visit_JoinedStr(self, node):
typegen = TypeGenNode("JoinedStr", [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.generic_visit(node)
self.opstack.pop(self.curop)
self.curop -= 1
if self.curop != -1:
typegen.addOuts(self.opstack[self.curop])
self.opstack[self.curop].addIns(typegen)
self.addNode(typegen)
def visit_If(self, node):
###extract type conditions
branchnodes = self.extractTypeCondition(node.test)
attribute2id_ori = deepcopy(self.attribute2id)
globalvar2id_ori = deepcopy(self.globalvar2id)
localvar2id_ori = deepcopy(self.localvar2id)
removed = []
for n in branchnodes:
var = n.ins[0]
if isinstance(var, SymbolNode):
if var.scope == "global":
self.lastglobalvar[var.name] = n
n.brachvar = var.name + str(self.globalvar2id[var.name])
elif var.scope == "local":
self.lastlocalvar[self.curfunc][var.name] = n
n.branchvar = var.name + str(self.localvar2id[self.curfunc][var.name])
elif var.scope == "attribute":
self.lastattribute[self.curclass][var.name] = n
n.branchvar = var.name + str(self.attribute2id[self.curclass][var.name])
else:
removed.append(n)
for n in removed:
if n in branchnodes:
branchnodes.remove(n)
if self.curfunc != -1:
lastlocalvar_backup = copy(self.lastlocalvar[self.curfunc])
if self.curclass != -1:
lastattribute_backup = copy(self.lastattribute[self.curclass])
lastglobalvar_backup = copy(self.lastglobalvar)
for n in branchnodes:
var = n.ins[0]
if var.scope == "global":
self.globalvar2id[var.name] += 1
symbol = SymbolNode([n], [], var.symbol, self.globalvar2id[var.name], scope = "global", extra = True)
symbol.setNodePos(var.lineno, var.columnno, var.columnend)
n.addOuts(symbol)
self.lastglobalvar[var.name] = symbol
self.addNode(symbol)
elif var.scope == "local":
self.localvar2id[self.curfunc][var.name] += 1
symbol = SymbolNode([n], [], var.symbol, self.localvar2id[self.curfunc][var.name], extra = True)
symbol.setNodePos(var.lineno, var.columnno, var.columnend)
n.addOuts(symbol)
self.lastlocalvar[self.curfunc][var.name] = symbol
self.addNode(symbol)
elif var.scope == "attribute":
self.attribute2id[self.curclass][var.name] += 1
symbol = SymbolNode([n], [], var.symbol, self.attribute2id[self.curclass][var.name], classname = var.classname, scope = "attribute", extra = True)
symbol.setNodePos(var.lineno, var.columnno, var.columnend)
n.addOuts(symbol)
self.lastattribute[self.curclass][var.name] = symbol
self.addNode(symbol)
if isinstance(node.body, list):
#for node: If
self.visitfield(node.body)
else:
#for node: IfExp
self.visit(node.body)
if self.curfunc != -1:
lastlocalvar_true = copy(self.lastlocalvar[self.curfunc])
if self.curclass != -1:
lastattribute_true = copy(self.lastattribute[self.curclass])
lastglobalvar_true = copy(self.lastglobalvar)
attribute2id_true = deepcopy(self.attribute2id)
globalvar2id_true = deepcopy(self.globalvar2id)
localvar2id_true = deepcopy(self.localvar2id)
if self.curclass != -1:
self.lastattribute[self.curclass] = copy(lastattribute_backup)
if self.curfunc != -1:
self.lastlocalvar[self.curfunc] = copy(lastlocalvar_backup)
self.lastglobalvar = copy(lastglobalvar_backup)
for n in branchnodes:
var = n.ins[0]
if var.scope == "global":
self.globalvar2id[var.name] += 1
symbol = SymbolNode([n], [], var.symbol, self.globalvar2id[var.name], scope = "global", extra = True)
symbol.setNodePos(var.lineno, var.columnno, var.columnend)
n.addOuts(symbol)
self.lastglobalvar[var.name] = symbol
self.addNode(symbol)
elif var.scope == "local":
self.localvar2id[self.curfunc][var.name] += 1
symbol = SymbolNode([n], [], var.symbol, self.localvar2id[self.curfunc][var.name], extra = True)
symbol.setNodePos(var.lineno, var.columnno, var.columnend)
n.addOuts(symbol)
self.lastlocalvar[self.curfunc][var.name] = symbol
self.addNode(symbol)
elif var.scope == "attribute":
self.attribute2id[self.curclass][var.name] += 1
symbol = SymbolNode([n], [], var.symbol, self.attribute2id[self.curclass][var.name], classname = var.classname, scope = "attribute", extra = True)
symbol.setNodePos(var.lineno, var.columnno, var.columnend)
n.addOuts(symbol)
self.lastattribute[self.curclass][var.name] = symbol
self.addNode(symbol)
if isinstance(node.orelse, list):
#for node: If
self.visitfield(node.orelse)
else:
#for node: IfExp
self.visit(node.orelse)
#TODO branch and merge cases
for key in self.globalvar2id:
#variable created in true body and used in false body
if key not in lastglobalvar_backup and key in lastglobalvar_true and key in self.lastglobalvar:
mergenode = MergeNode([self.lastglobalvar[key], lastglobalvar_true[key]], [], self.buildmergename([self.lastglobalvar[key], lastglobalvar_true[key]]))
self.lastglobalvar[key].addOuts(mergenode)
lastglobalvar_true[key].addOuts(mergenode)
self.lastglobalvar[key] = mergenode
self.GlobalTG.addNode(mergenode)
if self.curfunc != -1:
self.tgstack[self.curfunc].addNode(mergenode)
#variable created in true body and not used in false body
elif key not in lastglobalvar_backup and key in lastglobalvar_true and key not in self.lastglobalvar:
self.lastglobalvar[key] = lastglobalvar_true[key]
#variable created in false body
elif key not in lastglobalvar_backup and key not in lastglobalvar_true:
pass
#variable created before if and used in both true and false body or used only in false body
elif key in globalvar2id_true and globalvar2id_true[key] != self.globalvar2id[key] and self.lastglobalvar[key] != None and lastglobalvar_true[key] != None:
mergenode = MergeNode([self.lastglobalvar[key], lastglobalvar_true[key]], [], self.buildmergename([self.lastglobalvar[key], lastglobalvar_true[key]]))
self.lastglobalvar[key].addOuts(mergenode)
lastglobalvar_true[key].addOuts(mergenode)
self.lastglobalvar[key] = mergenode
self.GlobalTG.addNode(mergenode)
if self.curfunc != -1:
self.tgstack[self.curfunc].addNode(mergenode)
#varaible created before if and used only in true body
elif key in globalvar2id_true and globalvar2id_true[key] == self.globalvar2id[key] and self.globalvar2id[key] > globalvar2id_ori[key] and key in lastglobalvar_backup and lastglobalvar_backup[key] != None and lastglobalvar_true[key] != None:
mergenode = MergeNode([lastglobalvar_backup[key], lastglobalvar_true[key]], [], self.buildmergename([lastglobalvar_backup[key], lastglobalvar_true[key]]))
lastglobalvar_backup[key].addOuts(mergenode)
lastglobalvar_true[key].addOuts(mergenode)
self.lastglobalvar[key] = mergenode
self.GlobalTG.addNode(mergenode)
if self.curfunc != -1:
self.tgstack[self.curfunc].addNode(mergenode)
if self.curfunc != -1:
for key in self.localvar2id[self.curfunc]:
#variable created in true body and used in false body
if key not in lastlocalvar_backup and key in lastlocalvar_true and key in self.lastlocalvar[self.curfunc]:
mergenode = MergeNode([self.lastlocalvar[self.curfunc][key], lastlocalvar_true[key]], [], self.buildmergename([self.lastlocalvar[self.curfunc][key], lastlocalvar_true[key]]))
self.lastlocalvar[self.curfunc][key].addOuts(mergenode)
lastlocalvar_true[key].addOuts(mergenode)
self.lastlocalvar[self.curfunc][key] = mergenode
self.tgstack[self.curfunc].addNode(mergenode)
#variable created in true body and not used in false body
elif key not in lastlocalvar_backup and key in lastlocalvar_true and key not in self.lastlocalvar[self.curfunc]:
self.lastlocalvar[self.curfunc][key] = lastlocalvar_true[key]
#variable created in false body
elif key not in lastlocalvar_backup and key not in lastlocalvar_true:
pass
#variable created before if and used in both true and false body or used only in false body
elif key in localvar2id_true[self.curfunc] and localvar2id_true[self.curfunc][key] != self.localvar2id[self.curfunc][key] and lastlocalvar_true[key] != None and self.lastlocalvar[self.curfunc][key] != None:
mergenode = MergeNode([self.lastlocalvar[self.curfunc][key], lastlocalvar_true[key]], [], self.buildmergename([self.lastlocalvar[self.curfunc][key], lastlocalvar_true[key]]))
self.lastlocalvar[self.curfunc][key].addOuts(mergenode)
lastlocalvar_true[key].addOuts(mergenode)
self.lastlocalvar[self.curfunc][key] = mergenode
self.tgstack[self.curfunc].addNode(mergenode)
#varaible created before if and used only in true body
elif key in localvar2id_true[self.curfunc] and localvar2id_true[self.curfunc][key] == self.localvar2id[self.curfunc][key] and key in lastlocalvar_backup and self.localvar2id[self.curfunc][key] > localvar2id_ori[self.curfunc][key] and lastlocalvar_backup[key] != None and lastlocalvar_true[key] != None:
mergenode = MergeNode([lastlocalvar_backup[key], lastlocalvar_true[key]], [], self.buildmergename([lastlocalvar_backup[key], lastlocalvar_true[key]]))
lastlocalvar_backup[key].addOuts(mergenode)
lastlocalvar_true[key].addOuts(mergenode)
self.lastlocalvar[self.curfunc][key] = mergenode
self.tgstack[self.curfunc].addNode(mergenode)
if self.curclass != -1:
for key in self.attribute2id[self.curclass]:
#variable created in true body and used in false body
if key not in lastattribute_backup and key in lastattribute_true and key in self.lastattribute[self.curclass]:
mergenode = MergeNode([self.lastattribute[self.curclass][key], lastattribute_true[key]], [], self.buildmergename([self.lastattribute[self.curclass][key], lastattribute_true[key]]))
self.lastattribute[self.curclass][key].addOuts(mergenode)
lastattribute_true[key].addOuts(mergenode)
self.lastattribute[self.curclass][key] = mergenode
self.addNode(mergenode)
#variable created in true body and not used in false body
elif key not in lastattribute_backup and key in lastattribute_true and key not in self.lastattribute[self.curclass]:
self.lastattribute[self.curclass][key] = lastattribute_true[key]
#variable created in false body
elif key not in lastattribute_backup and key not in lastattribute_true:
pass
#variable created before if and used in both true and false body or used only in false body
elif key in attribute2id_true[self.curclass] and attribute2id_true[self.curclass][key] != self.attribute2id[self.curclass][key] and self.lastattribute[self.curclass][key] != None and lastattribute_true[key] != None:
mergenode = MergeNode([self.lastattribute[self.curclass][key], lastattribute_true[key]], [], self.buildmergename([self.lastattribute[self.curclass][key], lastattribute_true[key]]))
self.lastattribute[self.curclass][key].addOuts(mergenode)
lastattribute_true[key].addOuts(mergenode)
self.lastattribute[self.curclass][key] = mergenode
self.addNode(mergenode)
elif key in attribute2id_true[self.curclass] and attribute2id_true[self.curclass][key] == self.attribute2id[self.curclass][key] and key in lastattribute_backup and self.attribute2id[self.curclass][key] > attribute2id_ori[self.curclass][key] and lastattribute_backup[key] != None and lastattribute_true[key] != None:
mergenode = MergeNode([lastattribute_backup[key], lastattribute_true[key]], [], self.buildmergename([lastattribute_backup[key], lastattribute_true[key]]))
lastattribute_backup[key].addOuts(mergenode)
lastattribute_true[key].addOuts(mergenode)
self.lastattribute[self.curclass][key] = mergenode
self.addNode(mergenode)
#branch nodes should not exist in lastvar map
for n in branchnodes:
var = n.ins[0]
if var.scope == "global" and self.lastglobalvar[var.name] == n:
self.globalvar2id[var.name] += 1
symbol = SymbolNode([n], [], var.symbol, self.globalvar2id[var.name], scope = "global")
symbol.setNodePos(var.lineno, var.columnno, var.columnend)
n.addOuts(symbol)
self.lastglobalvar[var.name] = symbol
self.addNode(symbol)
elif var.scope == "local" and self.lastlocalvar[self.curfunc][var.name] == n:
self.localvar2id[self.curfunc][var.name] += 1
symbol = SymbolNode([n], [], var.symbol, self.localvar2id[self.curfunc][var.name])
symbol.setNodePos(var.lineno, var.columnno, var.columnend)
n.addOuts(symbol)
self.lastlocalvar[self.curfunc][var.name] = symbol
self.addNode(symbol)
elif var.scope == "attribute" and self.lastattribute[self.curclass][var.name] == n:
self.attribute2id[self.curclass][var.name] += 1
symbol = SymbolNode([n], [], var.symbol, self.attribute2id[self.curclass][var.name])
symbol.setNodePos(var.lineno, var.columnno, var.columnend)
n.addOuts(symbol)
self.lastattribute[self.curclass][var.name] = symbol
self.addNode(symbol)
def visit_Try(self, node):
if self.curfunc != -1:
lastlocalvar_ori = copy(self.lastlocalvar[self.curfunc])
if self.curclass != -1:
lastattribute_ori = copy(self.lastattribute[self.curclass])
lastglobalvar_ori = copy(self.lastglobalvar)
if self.curfunc != -1:
self.tgstack[self.curfunc].intry += 1
else:
self.GlobalTG.intry += 1
self.visitfield(node.body)
if self.curfunc != -1:
lastlocalvar_try = copy(self.lastlocalvar[self.curfunc])
if self.curclass != -1:
lastattribute_try = copy(self.lastattribute[self.curclass])
lastglobalvar_try = copy(self.lastglobalvar)
if self.curfunc != -1:
self.lastlocalvar[self.curfunc] = copy(lastlocalvar_ori)
if self.curclass != -1:
self.lastattribute[self.curclass] = copy(lastattribute_ori)
self.lastglobalvar = copy(lastglobalvar_ori)
self.addMerge4Except()
if self.curfunc != -1:
self.tgstack[self.curfunc].intry -= 1
if len(self.tgstack[self.curfunc].trybuffer) > self.tgstack[self.curfunc].intry:
self.tgstack[self.curfunc].trybuffer.pop(self.tgstack[self.curfunc].intry)
else:
self.GlobalTG.intry -= 1
if len(self.GlobalTG.trybuffer) > self.GlobalTG.intry:
self.GlobalTG.trybuffer.pop(self.GlobalTG.intry)
self.visitfield(node.handlers)
if self.curfunc != -1:
self.lastlocalvar[self.curfunc] = copy(lastlocalvar_try)
if self.curclass != -1:
self.lastattribute[self.curclass] = copy(lastattribute_try)
self.lastglobalvar = copy(lastglobalvar_try)
self.visitfield(node.orelse)
self.addMerge4Finally()
self.visitfield(node.finalbody)
if self.curfunc != -1:
self.tgstack[self.curfunc].inexcept = 0
self.tgstack[self.curfunc].exceptbuffer.clear()
else:
self.GlobalTG.inexcept = 0
self.GlobalTG.exceptbuffer.clear()
def visit_ExceptHandler(self, node):
if self.curfunc != -1:
lastlocalvar_ori = copy(self.lastlocalvar[self.curfunc])
if self.curclass != -1:
lastattribute_ori = copy(self.lastattribute[self.curclass])
lastglobalvar_ori = copy(self.lastglobalvar)
if node.name != None:
symbol = SymbolNode([], [], node.name, 0)
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
typeobject = TypeObject("Exception", 0)
symbol.types.append(typeobject)
if self.curfunc != -1:
self.localvar2id[self.curfunc][node.name] = 1
self.lastlocalvar[self.curfunc][node.name] = symbol
else:
self.globalvar2id[node.name] = 1
self.lastglobalvar[node.name] = symbol
self.addNode(symbol)
if self.curfunc != -1:
self.tgstack[self.curfunc].inexcept += 1
else:
self.GlobalTG.inexcept += 1
self.visitfield(node.body)
if self.curfunc != -1:
self.lastlocalvar[self.curfunc] = copy(lastlocalvar_ori)
if node.name != None and node.name in self.localvar2id[self.curfunc]:
del self.localvar2id[self.curfunc][node.name]
deletekeys = []
for key in self.localvar2id[self.curfunc]:
if key.startswith(node.name + "_@_"):
deletekeys.append(key)
for key in deletekeys:
del self.localvar2id[self.curfunc][key]
if self.curclass != -1:
self.lastattribute[self.curclass] = copy(lastattribute_ori)
self.lastglobalvar = copy(lastglobalvar_ori)
if node.name != None and node.name in self.globalvar2id:
del self.globalvar2id[node.name]
deletekeys = []
for key in self.globalvar2id:
if key.startswith(node.name + "_@_"):
deletekeys.append(key)
for key in deletekeys:
del self.globalvar2id[key]
def visit_AsyncFor(self, node):
self.visit_For(node)
def visit_For(self, node):
if len(node.orelse) != 0:
raise ValueError("Currently we do not support for loops with else statements.")
typegen = TypeGenNode("forin", [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.target)
self.visit(node.iter)
self.opstack.pop(self.curop)
self.curop -= 1
attribute2id_ori = deepcopy(self.attribute2id)
globalvar2id_ori = deepcopy(self.globalvar2id)
localvar2id_ori = deepcopy(self.localvar2id)
if self.curfunc != -1:
lastlocalvar_ori = copy(self.lastlocalvar[self.curfunc])
if self.curclass != -1:
lastattribute_ori = copy(self.lastattribute[self.curclass])
lastglobalvar_ori = copy(self.lastglobalvar)
if self.curfunc != -1:
self.tgstack[self.curfunc].inloop += 1
else:
self.GlobalTG.inloop += 1
self.visitfield(node.body)
#add merge nodes for vars after this loop
if self.curfunc != -1:
for key in self.localvar2id[self.curfunc]:
if key in localvar2id_ori[self.curfunc] and key in lastlocalvar_ori and self.localvar2id[self.curfunc][key] > localvar2id_ori[self.curfunc][key]:
mergenode = MergeNode([self.lastlocalvar[self.curfunc][key], lastlocalvar_ori[key]], [], self.buildmergename([self.lastlocalvar[self.curfunc][key], lastlocalvar_ori[key]]))
self.lastlocalvar[self.curfunc][key].addOuts(mergenode)
lastlocalvar_ori[key].addOuts(mergenode)
self.lastlocalvar[self.curfunc][key] = mergenode
self.addNode(mergenode)
elif self.curclass != -1:
for key in self.attribute2id[self.curclass]:
if key in attribute2id_ori[self.curclass] and key in lastattribute_ori and self.attribute2id[self.curclass][key] > attribute2id_ori[self.curclass][key] and self.lastattribute[self.curclass][key] != None and lastattribute_ori[key] != None:
mergenode = MergeNode([self.lastattribute[self.curclass][key], lastattribute_ori[key]], [], self.buildmergename([self.lastattribute[self.curclass][key], lastattribute_ori[key]]))
self.lastattribute[self.curclass][key].addOuts(mergenode)
lastattribute_ori[key].addOuts(mergenode)
self.lastattribute[self.curclass][key] = mergenode
self.addNode(mergenode)
for key in self.globalvar2id:
if key in globalvar2id_ori and key in lastglobalvar_ori and self.globalvar2id[key] > globalvar2id_ori[key] and self.lastglobalvar[key] != None and lastglobalvar_ori[key] != None:
mergenode = MergeNode([self.lastglobalvar[key], lastglobalvar_ori[key]], [], self.buildmergename([self.lastglobalvar[key], lastglobalvar_ori[key]]))
self.lastglobalvar[key].addOuts(mergenode)
lastglobalvar_ori[key].addOuts(mergenode)
self.lastglobalvar[key] = mergenode
self.addNode(mergenode)
#add merges for the first use of variables in this loop
self.addMergeNodes()
if self.curfunc != -1:
self.tgstack[self.curfunc].inloop -= 1
if len(self.tgstack[self.curfunc].loopbuffer) > self.tgstack[self.curfunc].inloop:
self.tgstack[self.curfunc].loopbuffer.pop(self.tgstack[self.curfunc].inloop)
else:
self.GlobalTG.inloop -= 1
if len(self.GlobalTG.loopbuffer) > self.GlobalTG.inloop:
self.GlobalTG.loopbuffer.pop(self.GlobalTG.inloop)
self.addNode(typegen)
def visit_While(self, node):
if len(node.orelse) != 0:
raise ValueError("Currently we do not support for loops with else statements.")
self.asifcond = True
self.visit(node.test)
self.asifcond = False
attribute2id_ori = deepcopy(self.attribute2id)
globalvar2id_ori = deepcopy(self.globalvar2id)
localvar2id_ori = deepcopy(self.localvar2id)
if self.curfunc != -1:
lastlocalvar_ori = copy(self.lastlocalvar[self.curfunc])
if self.curclass != -1:
lastattribute_ori = copy(self.lastattribute[self.curclass])
lastglobalvar_ori = copy(self.lastglobalvar)
if self.curfunc != -1:
self.tgstack[self.curfunc].inloop += 1
else:
self.GlobalTG.inloop += 1
self.visitfield(node.body)
#add merges for the first use of variables in this loop
self.addMergeNodes()
#add merge nodes for vars after this loop
if self.curfunc != -1:
for key in self.localvar2id[self.curfunc]:
if key in localvar2id_ori[self.curfunc] and self.localvar2id[self.curfunc][key] > localvar2id_ori[self.curfunc][key]:
mergenode = MergeNode([self.lastlocalvar[self.curfunc][key], lastlocalvar_ori[key]], [], self.buildmergename([self.lastlocalvar[self.curfunc][key], lastlocalvar_ori[key]]))
self.lastlocalvar[self.curfunc][key].addOuts(mergenode)
lastlocalvar_ori[key].addOuts(mergenode)
self.lastlocalvar[self.curfunc][key] = mergenode
self.addNode(mergenode)
elif self.curclass != -1:
for key in self.attribute2id[self.curclass]:
if key in attribute2id_ori[self.curclass] and self.attribute2id[self.curclass][key] > attribute2id_ori[self.curclass][key] and self.lastattribute[self.curclass][key] != None and lastattribute_ori[key] != None:
mergenode = MergeNode([self.lastattribute[self.curclass][key], lastattribute_ori[key]], [], self.buildmergename([self.lastattribute[self.curclass][key], lastattribute_ori[key]]))
self.lastattribute[self.curclass][key].addOuts(mergenode)
lastattribute_ori[key].addOuts(mergenode)
self.lastattribute[self.curclass][key] = mergenode
self.addNode(mergenode)
for key in self.globalvar2id:
if key in globalvar2id_ori and self.globalvar2id[key] > globalvar2id_ori[key] and lastglobalvar_ori[key] != None and self.lastglobalvar[key] != None:
mergenode = MergeNode([self.lastglobalvar[key], lastglobalvar_ori[key]], [], self.buildmergename([self.lastglobalvar[key], lastglobalvar_ori[key]]))
self.lastglobalvar[key].addOuts(mergenode)
lastglobalvar_ori[key].addOuts(mergenode)
self.lastglobalvar[key] = mergenode
self.addNode(mergenode)
if self.curfunc != -1:
self.tgstack[self.curfunc].loopbuffer.pop(self.tgstack[self.curfunc].inloop - 1)
self.tgstack[self.curfunc].inloop -= 1
else:
self.GlobalTG.loopbuffer.pop(self.GlobalTG.inloop - 1)
self.GlobalTG.inloop -= 1
def visit_Break(self, node):
logger.warning("Break statement at line {} visited, it may change the data flow but currently HiTyper does not handle it.".format(node.lineno))
def visit_Continue(self, node):
logger.warning("Continue statement at line {} visited, it may change the data flow but currently HiTyper does not handle it.".format(node.lineno))
def visit_With(self, node):
self.withpos.append([node.lineno, node.col_offset, node.end_col_offset])
self.visitfield(node.items)
self.visitfield(node.body)
for item in self.withitemnames[len(self.withitemnames) - 1]:
if self.curfunc != -1 and item in self.localvar2id[self.curfunc]:
del self.localvar2id[self.curfunc][item]
del self.lastlocalvar[self.curfunc][item]
deletekeys = []
for key in self.localvar2id[self.curfunc]:
if key.startswith(item +"_@_"):
deletekeys.append(key)
for key in deletekeys:
del self.localvar2id[self.curfunc][key]
if key in self.lastlocalvar[self.curfunc]:
del self.lastlocalvar[self.curfunc][key]
elif item in self.globalvar2id:
del self.globalvar2id[item]
del self.lastglobalvar[item]
deletekeys = []
for key in self.globalvar2id:
if key.startswith(item + "_@_"):
deletekeys.append(key)
for key in deletekeys:
del self.globalvar2id[key]
if key in self.lastglobalvar:
del self.lastglobalvar[key]
self.withitemnames.pop(len(self.withitemnames) - 1)
self.withpos.pop(len(self.withpos) - 1)
def visit_AsyncWith(self, node):
self.visit_With(node)
def visit_withitem(self, node):
if node.optional_vars != None:
typegen = TypeGenNode("=", [], [])
index = len(self.withpos) - 1
typegen.setNodePos(self.withpos[index][0], self.withpos[index][1], self.withpos[index][2])
self.opstack.append(typegen)
self.curop += 1
self.visit(node.context_expr)
self.visit(node.optional_vars)
self.withitemnames.append([Attribute2Str(node.optional_vars)])
self.opstack.pop(self.curop)
self.curop -= 1
self.addNode(typegen)
else:
self.asifcond = True
self.visit(node.context_expr)
self.asifcond = False
self.withitemnames.append([])
def visit_BoolOp(self, node):
asifcond = self.asifcond
if self.asifcond:
self.asifcond = False
typegen = TypeGenNode(AST2Op[type(node.op)], [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
if len(node.values) > 2:
self.visit(node.values[0])
self.visit(node.values[1])
else:
self.visitfield(node.values)
self.opstack.pop(self.curop)
self.curop -= 1
if len(node.values) > 2:
prev_typegen = typegen
for i in range(2, len(node.values)):
curnode = node.values[i]
more_typegen = TypeGenNode(AST2Op[type(node.op)], [prev_typegen], [])
prev_typegen.addOuts(more_typegen)
more_typegen.setNodePos(curnode.lineno, curnode.col_offset, curnode.end_col_offset)
self.opstack.append(more_typegen)
self.curop += 1
self.visit(curnode)
self.opstack.pop(self.curop)
self.curop -= 1
self.addNode(more_typegen)
prev_typegen = more_typegen
else:
prev_typegen = typegen
if self.curop != -1 and not asifcond:
self.opstack[self.curop].addIns(prev_typegen)
prev_typegen.addOuts(self.opstack[self.curop])
#typegen.performTypingRules()
self.addNode(typegen)
def visit_Compare(self, node):
lastcompare = None
i = -1
asifcond = self.asifcond
if self.asifcond == True:
self.asifcond = False
for op in node.ops:
i += 1
if lastcompare == None:
typegen = TypeGenNode(AST2Op[type(op)], [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.addNode(typegen)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.left)
self.visit(node.comparators[0])
self.opstack.pop(self.curop)
self.curop -= 1
# typegen.performTypingRules()
lastcompare = typegen
else:
typegen = TypeGenNode(AST2Op[type(op)], [], [])
typegen.setNodePos(node.comparators[i-1].lineno, node.comparators[i-1].col_offset, node.comparators[i-1].end_col_offset)
self.addNode(typegen)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.comparators[i-1])
self.visit(node.comparators[i])
self.opstack.pop(self.curop)
self.curop -= 1
typegen2 = TypeGenNode("and", [lastcompare, typegen], [])
lastcompare.addOuts(typegen2)
typegen.addOuts(typegen2)
typegen2.setNodePos(node.comparators[i].lineno, node.comparators[i].col_offset, node.comparators[i].end_col_offset)
lastcompare = typegen2
# typegen2.performTypingRules()
self.addNode(typegen2)
if self.curop != -1 and not asifcond:
lastcompare.addOuts(self.opstack[self.curop])
self.opstack[self.curop].addIns(lastcompare)
def visit_List(self, node):
if type(node.ctx) == ast.Store:
typegen = TypeGenNode("List_Write", [self.opstack[self.curop]], [])
self.opstack[self.curop].addOuts(typegen)
else:
typegen = TypeGenNode("List_Read", [], [self.opstack[self.curop]])
self.opstack[self.curop].addIns(typegen)
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.generic_visit(node)
self.opstack.pop(self.curop)
self.curop -= 1
#typegen.performTypingRules(iterable=True)
self.addNode(typegen)
def visit_Tuple(self, node):
if type(node.ctx) == ast.Store:
typegen = TypeGenNode("Tuple_Write", [self.opstack[self.curop]], [])
self.opstack[self.curop].addOuts(typegen)
else:
typegen = TypeGenNode("Tuple_Read", [], [self.opstack[self.curop]])
self.opstack[self.curop].addIns(typegen)
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.generic_visit(node)
self.opstack.pop(self.curop)
self.curop -= 1
#typegen.performTypingRules(iterable=True)
self.addNode(typegen)
def visit_Set(self, node):
typegen = TypeGenNode("Set_Read", [], [self.opstack[self.curop]])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack[self.curop].addIns(typegen)
self.opstack.append(typegen)
self.curop += 1
self.generic_visit(node)
self.opstack.pop(self.curop)
self.curop -= 1
#typegen.performTypingRules(iterable=True)
self.addNode(typegen)
def visit_Dict(self, node):
typegen = TypeGenNode("Dict_Read", [], [self.opstack[self.curop]], splitindex = len(node.keys))
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack[self.curop].addIns(typegen)
self.opstack.append(typegen)
self.curop += 1
self.visitfield(node.keys)
self.visitfield(node.values)
self.opstack.pop(self.curop)
self.curop -= 1
#typegen.performTypingRules(iterable=True)
self.addNode(typegen)
def visit_Return(self, node):
name = "Return_Value@" + self.funcstack[self.curfunc]
if self.curclass != -1:
name = name + "@" + self.classstack[self.curclass]
symbol = SymbolNode([], [], name, 0, ctx = "Return")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(symbol)
self.curop += 1
self.generic_visit(node)
self.opstack.pop(self.curop)
self.curop -= 1
self.addNode(symbol)
def visit_Lambda(self, node):
logger.warning("Lambda function at line {} visited, currently HiTyper does not support Lambda function, it may result in incorrect inference.".format(node.lineno))
typenode = TypeNode([self.opstack[self.curop]], TypeObject("Callable", 0))
typenode.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack[self.curop].addIns(typenode)
self.addNode(typenode)
def visit_Expr(self, node):
if type(node.value) != ast.Constant:
self.visit(node.value)
def visit_Await(self, node):
self.visit(node.value)
def visit_comprehension(self, node):
typegen = TypeGenNode("forin", [], [])
typegen.setNodePos(node.iter.lineno, node.iter.col_offset, node.iter.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.iter)
self.forin = True
self.visit(node.target)
self.forin = False
self.opstack.pop(self.curop)
self.curop -= 1
self.asifcond = True
self.visitfield(node.ifs)
self.asifcond = False
self.addNode(typegen)
def visit_DictComp(self, node):
asifcond = self.asifcond
if self.asifcond:
self.asifcond = False
typegen = TypeGenNode("DictComp", [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.visitfield(node.generators)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.key)
self.visit(node.value)
self.opstack.pop(self.curop)
self.curop -= 1
if not asifcond and self.curop != -1:
self.opstack[self.curop].addIns(typegen)
typegen.addOuts(self.opstack[self.curop])
self.addNode(typegen)
def visit_ListComp(self, node):
asifcond = self.asifcond
if self.asifcond:
self.asifcond = False
typegen = TypeGenNode("ListComp", [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.visitfield(node.generators)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.elt)
self.opstack.pop(self.curop)
self.curop -= 1
if not asifcond and self.curop != -1:
self.opstack[self.curop].addIns(typegen)
typegen.addOuts(self.opstack[self.curop])
self.addNode(typegen)
def visit_SetComp(self, node):
asifcond = self.asifcond
if self.asifcond:
self.asifcond = False
typegen = TypeGenNode("SetComp", [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.visitfield(node.generators)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.elt)
self.opstack.pop(self.curop)
self.curop -= 1
if not asifcond and self.curop != -1:
self.opstack[self.curop].addIns(typegen)
typegen.addOuts(self.opstack[self.curop])
self.addNode(typegen)
def visit_GeneratorExp(self, node):
asifcond = self.asifcond
if self.asifcond:
self.asifcond = False
typegen = TypeGenNode("GeneratorExp", [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.visitfield(node.generators)
self.opstack.append(typegen)
self.curop += 1
self.visit(node.elt)
self.opstack.pop(self.curop)
self.curop -= 1
if not asifcond and self.curop != -1:
self.opstack[self.curop].addIns(typegen)
typegen.addOuts(self.opstack[self.curop])
self.addNode(typegen)
def visit_Yield(self, node):
typegen = TypeGenNode("yield", [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
if node.value != None:
self.visit(node.value)
self.opstack.pop(self.curop)
self.curop -= 1
if len(self.opstack) == 0:
name = "Return_Value@" + self.funcstack[self.curfunc]
if self.curclass != -1:
name = name + "@" + self.classstack[self.curclass]
symbol = SymbolNode([typegen], [], name, 0, ctx = "Return")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
typegen.addOuts(symbol)
self.addNode(symbol)
else:
self.opstack[self.curop].addIns(typegen)
typegen.addOuts(self.opstack[self.curop])
self.addNode(typegen)
def visit_YieldFrom(self, node):
typegen = TypeGenNode("yield", [], [])
typegen.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
self.opstack.append(typegen)
self.curop += 1
if node.value != None:
self.visit(node.value)
self.opstack.pop(self.curop)
self.curop -= 1
if len(self.opstack) == 0:
name = "Return_Value@" + self.funcstack[self.curfunc]
if self.curclass != -1:
name = name + "@" + self.classstack[self.curclass]
symbol = SymbolNode([typegen], [], name, 0, ctx = "Return")
symbol.setNodePos(node.lineno, node.col_offset, node.end_col_offset)
typegen.addOuts(symbol)
self.addNode(symbol)
else:
self.opstack[self.curop].addIns(typegen)
typegen.addOuts(self.opstack[self.curop])
self.addNode(typegen)
def visit_Assert(self, node):
logger.warning("Assert statement visited at Line {}, it may change the data flow of program, but HiTyper currently ignores it.".format(node.lineno))
def finalize(self, tg):
if isinstance(tg, TypeGraph):
for key in tg.symbolnodes:
for n in tg.symbolnodes[key]:
if len(n.ins) > 1:
mergenode = MergeNode(n.ins, [n], self.buildmergename(n.ins))
for innode in n.ins:
innode.outs[innode.outs.index(n)] = mergenode
n.ins = [mergenode]
self.addNode(mergenode)
for n in tg.returnvaluenodes:
if len(n.ins) > 1:
mergenode = MergeNode(n.ins, [n], self.buildmergename(n.ins))
for innode in n.ins:
innode.outs[innode.outs.index(n)] = mergenode
n.ins = [mergenode]
self.addNode(mergenode)
else:
for key in tg.globalsymbolnodes:
for n in tg.globalsymbolnodes[key]:
if len(n.ins) > 1:
mergenode = MergeNode(n.ins, [n], self.buildmergename(n.ins))
for innode in n.ins:
innode.outs[innode.outs.index(n)] = mergenode
n.ins = [mergenode]
self.addNode(mergenode)
def canRemove(self, node):
for i in node.ins:
if isinstance(i, BranchNode):
return False
return True
def optimize(self, tg):
#remove redundant merge nodes (for which do not have output nodes)
if isinstance(tg, TypeGraph):
changed = True
while(changed):
removenodes = []
for n in tg.mergenodes:
if len(n.outs) == 0 and self.canRemove(n):
removenodes.append(n)
changed = False
for n in removenodes:
for innode in n.ins:
if n in innode.outs:
innode.outs.remove(n)
tg.mergenodes.remove(n)
tg.nodes.remove(n)
changed = True
else:
changed = True
while(changed):
removenodes = []
for n in tg.globalmergenodes:
if len(n.outs) == 0 and self.canRemove(n):
removenodes.append(n)
changed = False
for n in removenodes:
for innode in n.ins:
if n in innode.outs:
innode.outs.remove(n)
tg.globalmergenodes.remove(n)
tg.globalnodes.remove(n)
changed = True
#remove redundant branch nodes (for which do not have output nodes)
if isinstance(tg, TypeGraph):
changed = True
while(changed):
removenodes = []
for n in tg.branchnodes:
if (len(n.outs) == 0 and self.canRemove(n)) or (len(n.outs) == 1 and n.outs[0] == "PlaceHolder"):
removenodes.append(n)
changed = False
for n in removenodes:
for innode in n.ins:
if n in innode.outs:
innode.outs.remove(n)
tg.branchnodes.remove(n)
tg.nodes.remove(n)
changed = True
else:
changed = True
while(changed):
removenodes = []
for n in tg.globalbranchnodes:
if (len(n.outs) == 0 and self.canRemove(n)) or (len(n.outs) == 1 and n.outs[0] == "PlaceHolder"):
removenodes.append(n)
changed = False
for n in removenodes:
for innode in n.ins:
if n in innode.outs:
innode.outs.remove(n)
tg.globalbranchnodes.remove(n)
tg.globalnodes.remove(n)
changed = True
#refine branch nodes
if isinstance(tg, TypeGraph):
for n in tg.branchnodes:
if len(n.outs) == 3 and n.outs[0] == "PlaceHolder":
outs = [n.outs[2], n.outs[1]]
n.outs = outs
else:
for n in tg.globalbranchnodes:
if len(n.outs) == 3 and n.outs[0] == "PlaceHolder":
outs = [n.outs[2], n.outs[1]]
n.outs = outs
def run(self, root):
if self.alias > 0:
logger.info("[1st Pass: Alias Analysis] Started...")
a = AliasAnalyzer(self.GlobalTG.aliasgraph)
self.GlobalTG.aliasgraph = a.run(root)
logger.info("[1st Pass: Alias Analysis] Generated {0} attribute nodes, captured {1} aliases.".format(len(self.GlobalTG.aliasgraph.nodes), self.GlobalTG.aliasgraph.getAliasNum()))
logger.info("[1st Pass: Alias Analysis] Finished!")
else:
logger.info("[1st Pass: Alias Analysis] Skipped...")
if self.repo != None:
logger.info("[2nd Pass: Call Analysis] Started...")
cg = CallGraphGenerator([self.filename], self.repo, -1, CALL_GRAPH_OP)
cg.analyze()
formatter = formats.Simple(cg)
output = formatter.generate()
self.GlobalTG.callgraph = output
num = 0
for key in output:
num += len(output[key])
logger.info("[2nd Pass: Call Analysis] Captured {0} call relationships.".format(num))
logger.info("[2nd Pass: Call Analysis] Finished!")
else:
logger.info("[2nd Pass: Call Analysis] Skipped...")
if self.alias <= 1:
logger.info("[3rd Pass: TDG Generation] Started...")
self.visit(root)
self.GlobalTG.addClassname(self.classnames)
if self.optimized:
self.optimize(self.GlobalTG)
for tg in self.GlobalTG.tgs:
self.optimize(tg)
logger.info("[3rd Pass: TDG Generation] Finished!")
else:
logger.info("[3rd Pass: TDG Generation] Skipped...")
return self.GlobalTG
```
#### File: HiTyper/hityper/typeobject.py
```python
import re
from hityper.stdtypes import stdtypes, exporttypemap, inputtypemap, typeequalmap
from hityper import logger
logger.name = __name__
class TypeObject(object):
def __init__(self, t, category, added = False):
self.type = t
#categories: 0 - builtins
#1 - standard libraries
#2 - user defined
self.category = category
self.compatibletypes = [t]
self.startnodename = None
self.startnodeorder = None
self.added = added
if t in ["bool", "int", "float", "complex"]:
self.compatibletypes = ["int", "float", "complex", "bool"]
self.elementtype = []
self.keytype = []
self.valuetype = []
def buildTuple(self, t):
self.type = "Tuple"
self.elementtype = t
def buildDict(self, key, value):
self.type = "Dict"
self.elementtype = key
self.keytype = key
self.valuetype = value
def buildList(self, t):
self.type = "List"
self.elementtype = t
def buildSet(self, t):
self.type = "Set"
self.elementtype = t
@property
def getBuiltinTypes(self):
#ref: https://docs.python.org/zh-cn/3/library/typing.html
#ref: https://docs.python.org/3/library/stdtypes.html
self.builintypes = {}
self.builintypes["element"] = ["bool", "int", "float", "None", "Any", "Text", "type", "bytes"]
self.builintypes["generic"] = [ "List", "Tuple", "Set", "Dict", "Union", "Optional", "Callable", "Iterable", "Sequence", "Generator"]
self.builintypes["rare"] = ["complex", "bytearray", "Frozenset", "memoryview", "range"]
return self.builintypes
@staticmethod
def isCompatible(l, r):
for t in l.compatibletypes:
if t == r.type:
return True
return False
@staticmethod
def existCompatible(l, listr):
for r in listr:
if TypeObject.isCompatible(l, r):
return True
if TypeObject.existSame(l, listr):
return True
return False
@staticmethod
def existNumbers(l, listr, exact = False):
#now we conduct exact match
if not exact:
return False
if l.type in ["int", "float"]:
for r in listr:
if r.type in ["int", "float"]:
return True
return False
#l is x and optional[x] in listr will return true
@staticmethod
def existOptional(l, listr):
for t in listr:
if t.type.lower() == "optional" and len(t.elementtype) == 1 and typeequalmap[t.elementtype[0].type.lower()] == typeequalmap[l.type.lower()]:
return True
return False
@staticmethod
def existSame( l, listr):
for r in listr:
if isinstance(r, str):
if r.startswith("<") and r.endswith(">"):
continue
if TypeObject.isSimilar(l, TypeObject(r,0)):
return True
elif TypeObject.isIdentical(l, r):
return True
return False
@staticmethod
def existSimilar(l, listr):
for r in listr:
if TypeObject.isSimilar(l,r):
return True
return False
@staticmethod
def findSame(l, listr):
for r in listr:
if isinstance(r, str) and TypeObject.isSimilar(l, TypeObject(r,0)):
return r
elif isinstance(r, TypeObject) and TypeObject.isIdentical(l,r):
return r
return None
@staticmethod
def isIdentical( l, r):
if l.category != 0 and r.category != 0:
if l.type == r.type:
return True
elif l.category == r.category and l.category == 2 and (l.type.split(".")[-1] == r.type.split(".")[-1]):
return True
else:
return False
if l.category == 0 and r.category == 0:
if typeequalmap[l.type.lower()] == typeequalmap[r.type.lower()]:
if l.type.lower() not in ["list", "tuple", "set", "iterable", "optional", "union", "sequence", "generator", "dict"]:
return True
else:
if l.type.lower() == "dict" and TypeObject.isIdenticalSet(l.keytype, r.keytype) and TypeObject.isIdenticalSet(l.valuetype, r.valuetype):
return True
elif l.type.lower() in ["list", "tuple", "set", "iterable", "optional", "union", "sequence", "generator"] and TypeObject.isIdenticalSet(l.elementtype, r.elementtype):
return True
elif (l.type.lower() == "literal" and typeequalmap[r.type.lower()] <= 3) or (r.type.lower() == "literal" and typeequalmap[l.type.lower()] <= 3):
return True
elif (l.type.lower() == "iterable" and typeequalmap[r.type.lower()] <= 17 and typeequalmap[r.type.lower()] >= 11) or (r.type.lower() == "iterable" and typeequalmap[l.type.lower()] <= 17 and typeequalmap[l.type.lower()] >= 11):
return True
if l.category == 0 and r.category == 2 and l.type.lower() == "type" and len(l.elementtype) == 1:
return TypeObject.isIdentical(l.elementtype[0], r)
if r.category == 0 and l.category == 2 and r.type.lower() == "type" and len(r.elementtype) == 1:
return TypeObject.isIdentical(r.elementtype[0], l)
return False
@staticmethod
def isSimilar(l,r):
if l.category == 0 and r.category == 0 and typeequalmap[l.type.lower()] == typeequalmap[r.type.lower()]:
return True
elif l.type.lower() == r.type.lower():
return True
else:
return False
@staticmethod
def isIdenticalSet( llist, rlist):
invalidtypes = []
for l in llist:
if not isinstance(l, TypeObject):
invalidtypes.append(l)
for r in rlist:
if not isinstance(r, TypeObject):
invalidtypes.append(r)
for t in invalidtypes:
if t in llist:
llist.remove(t)
for t in invalidtypes:
if t in rlist:
rlist.remove(t)
for l in llist:
if l.type.lower() == "any":
return True
if not TypeObject.existSame(l, rlist) and l.type.lower() != "any":
return False
for r in rlist:
if r.type.lower() == "any":
return True
if not TypeObject.existSame(r, llist) and r.type.lower() != "any":
return False
return True
@staticmethod
def existType(t, listr):
for r in listr:
if isinstance(t, str):
if (r.category == 0 and typeequalmap[t.lower()] == typeequalmap[r.type.lower()]) or (r.category == 2 and r.type == t):
return True
elif isinstance(t, TypeObject):
if (r.category == 0 and t.category == 0 and typeequalmap[t.type.lower()] == typeequalmap[r.type.lower()]) or (t.type == r.type):
return True
return False
@staticmethod
def equal2type(t, typestr):
if typeequalmap[t.type.lower()] == typeequalmap[typestr.lower()]:
return True
return False
@staticmethod
def equal2onetype(t, typestrs):
for s in typestrs:
if typeequalmap[t.type.lower()] == typeequalmap[s.lower()]:
return True
return False
@staticmethod
def combineTypes(listt):
if len(listt) > 1:
typeobject = TypeObject("Union", 0)
typeobject.elementtype = listt
return typeobject
elif len(listt) == 1:
return listt[0]
else:
return None
@staticmethod
def usertypeCompare(l, rlist):
for r in rlist:
if l.category == r.category and l.category == 2 and ((l.type.split(".")[-1] == r.type.split(".")[-1])):
return True
return False
@staticmethod
def existIncluded(l, rlist):
for r in rlist:
if TypeObject.isIncluded(l,r):
return True
return False
#if l is included in r, for generic types, list[a] is included in list[a,b]
@staticmethod
def isIncluded(l, r):
if r.type == "Optional" and len(r.elementtype) == 1 and l.type == r.elementtype[0].type:
return True
elif l.type != r.type:
return False
elif l.type == r.type and l.type in ["List", "Tuple", "Dict", "Set", "Iterable", "Optional", "Union", "Sequence", "Generator"]:
if l.type == "Dict":
for t in l.keytype:
if not TypeObject.existSame(t, r.keytype) and not TypeObject.existOptional(t, r.keytype) and not TypeObject.existIncluded(t, r.keytype):
return False
for t in l.valuetype:
if not TypeObject.existSame(t, r.valuetype) and not TypeObject.existOptional(t, r.valuetype) and not TypeObject.existIncluded(t, r.valuetype):
return False
return True
else:
for t in l.elementtype:
if not TypeObject.existSame(t, r.elementtype) and not TypeObject.existOptional(t, r.elementtype) and not TypeObject.existIncluded(t, r.elementtype):
return False
return True
@staticmethod
def isSetIncluded(llist, rlist):
for r in rlist:
if TypeObject.existSame(r, llist) or TypeObject.existNumbers(r, llist) or TypeObject.usertypeCompare(r, llist):
continue
else:
included = False
for l in llist:
if TypeObject.isIncluded(r, l):
included = True
break
if included:
continue
return False
return True
@staticmethod
def isSetIncluded2(llist, rlist):
for r in rlist:
if TypeObject.existSimilar(r, llist) or TypeObject.existNumbers(r, llist, exact = True) or TypeObject.usertypeCompare(r, llist):
continue
else:
included = False
for l in llist:
if TypeObject.isIncluded(r, l):
included = True
break
if included:
continue
return False
return True
@staticmethod
def simplifyGenericType(t):
if not isinstance(t, TypeObject):
return t
if t.type in ["Set", "Tuple", "List", "Awaitable", "Iterable", "Union"]:
t.elementtype = TypeObject.removeInclusiveTypes(t.elementtype)
elif t.type == "Dict":
t.keytype = TypeObject.removeInclusiveTypes(t.keytype)
t.valuetype = TypeObject.removeInclusiveTypes(t.valuetype)
elif t.type == "Optional":
t.elementtype = TypeObject.removeRedundantTypes(t.elementtype)
rm = None
for et in t.elementtype:
if et.type == "None":
rm = et
break
if rm != None and rm in t.elementtype:
t.elementtype.remove(rm)
return t
@staticmethod
def removeRedundantTypes(listt):
outs = []
for t in listt:
typeobj = TypeObject.simplifyGenericType(t)
if not TypeObject.existSame(typeobj, outs):
outs.append(typeobj)
return outs
#Example: if list[] and list[a] exists at the same time, then list[] is removed
@staticmethod
def removeInclusiveTypes(listt):
outs = TypeObject.removeRedundantTypes(listt)
removed = True
while removed:
removed = False
for i in range(0, len(outs)):
for j in range(0, len(outs)):
if i != j and TypeObject.isIncluded(outs[i], outs[j]):
removed = True
target = outs[i]
break
if removed and target in outs:
outs.remove(target)
return outs
@staticmethod
def removeInvalidTypes(t):
if isinstance(t, TypeObject):
elementtype = []
for tt in t.elementtype:
if isinstance(tt, TypeObject):
elementtype.append(TypeObject.removeInvalidTypes(tt))
t.elementtype = elementtype
keytype = []
for tt in t.keytype:
if isinstance(tt, TypeObject):
keytype.append(TypeObject.removeInvalidTypes(tt))
t.keytype = keytype
valuetype = []
for tt in t.valuetype:
if isinstance(tt, TypeObject):
valuetype.append(TypeObject.removeInvalidTypes(tt))
return t
def __str__(self):
return TypeObject.resolveTypeName(self)
@staticmethod
def resolveTypeName(t):
if isinstance(t, TypeObject):
t = TypeObject.removeInvalidTypes(t)
if t.category != 0:
return t.type
elif t.type.lower() not in exporttypemap:
raise TypeError("Unknown type: " + t.type)
typestr = exporttypemap[t.type.lower()]
if t.type.lower() in ["dict", "callable"]:
typestr = typestr + "["
if len(t.keytype) == 0:
typestr += ", "
elif len(t.keytype) == 1:
typestr = typestr + TypeObject.resolveTypeName(t.keytype[0]) + ", "
else:
typestr += "typing.Union["
for n in t.keytype:
typestr = typestr + TypeObject.resolveTypeName(n) + ","
typestr = typestr[:-1]
typestr += "], "
if len(t.valuetype) == 0:
pass
elif len(t.valuetype) == 1:
typestr = typestr + TypeObject.resolveTypeName(t.valuetype[0])
else:
typestr += "typing.Union["
for n in t.valuetype:
typestr = typestr + TypeObject.resolveTypeName(n) + ","
typestr = typestr[:-1]
typestr += "]"
typestr += "]"
elif t.type.lower() in ["set", "tuple", "list", "awaitable", "iterable", "sequence", "generator"]:
typestr = typestr + "["
if len(t.elementtype) == 1:
typestr = typestr + TypeObject.resolveTypeName(t.elementtype[0])
elif len(t.elementtype) == 2 and (t.elementtype[0].type == "None" or t.elementtype[1].type == "None"):
typestr += "typing.Optional["
for i in t.elementtype:
if i.type != "None":
typestr = typestr + TypeObject.resolveTypeName(i)
typestr += "]"
elif len(t.elementtype) >= 2:
typestr += "typing.Union["
for n in t.elementtype:
typestr = typestr + TypeObject.resolveTypeName(n) + ","
typestr = typestr[:-1]
typestr += "]"
typestr += "]"
elif t.type.lower() == "optional":
typestr += "["
if len(t.elementtype) > 1:
typestr += "typing.Union["
for n in t.elementtype:
typestr = typestr + TypeObject.resolveTypeName(n) + ","
typestr = typestr[:-1]
typestr += "]"
elif len(t.elementtype) == 1:
typestr = typestr + TypeObject.resolveTypeName(t.elementtype[0]) + "]"
else:
typestr += "]"
elif t.type.lower() == "union":
typestr += "["
if len(t.elementtype) == 0:
typestr += "]"
if len(t.elementtype) == 1:
typestr = typestr + TypeObject.resolveTypeName(t.elementtype[0]) + "]"
elif len(t.elementtype) > 1:
for n in t.elementtype:
typestr = typestr + TypeObject.resolveTypeName(n) + ","
typestr = typestr[:-1]
typestr += "]"
return typestr
else:
raise TypeError("t should be a TypeObject.")
@staticmethod
def resolveTypeNames(tlist):
typestr = "Possible Types {"
if isinstance(tlist, list):
for i, t in enumerate(tlist):
typestr = typestr + " " + str(i+1) + "." + str(t.category) + "- " + TypeObject.resolveTypeName(t)
else:
raise TypeError("tlist must be a list of TypeObject.")
return typestr + " }"
@staticmethod
def resolveTypeNames2(tlist):
typestr = "Union["
if isinstance(tlist, list):
for i, t in enumerate(tlist):
typestr = typestr + TypeObject.resolveTypeName(t) + ","
if typestr[-1] == ",":
typestr = typestr[:len(typestr)-1]
else:
raise TypeError("tlist must be a list of TypeObject.")
return typestr + "]"
@staticmethod
def checkType(typestr):
typeobjs = TypeObject.Str2Obj(typestr)
if len(typeobjs) == 0:
return None
elif typeobjs[0].category == 0 and len(typeobjs[0].elementtype) == 0 and len(typeobjs[0].keytype) == 0 and len(typeobjs[0].valuetype) == 0:
return "simple"
elif typeobjs[0].category == 0:
return "generic"
elif typeobjs[0].category == 2:
return "user-defined"
else:
return None
@staticmethod
def Str2Obj(typestr):
strobjs = []
typestr = typestr.replace(" ", "")
typestr = typestr.replace("builtins.", "")
typestr = typestr.replace("typing_extensions.", "typing.")
if len(typestr) > 2 and typestr[0] == "[" and typestr[-1] == "]":
typestr = typestr[1:len(typestr) - 1]
if typestr == None or typestr == "":
return strobjs
if len(typestr) > 500:
#logger.warning("Type name is too long.")
return strobjs
if typestr in ["Union", "typing.Union"] and "[" not in typestr:
return strobjs
elif typestr.lower() in inputtypemap:
strobjs.append(TypeObject(inputtypemap[typestr.lower()], 0))
return strobjs
elif "[" in typestr and "]" in typestr:
typestr = typestr.replace("t.", "typing.")
index1 = typestr.index("[")
index2 = typestr.rfind("]")
innerstr = typestr[index1 + 1:index2]
if "Union" in typestr[:index1]:
strs = innerstr.split(",")
leftnum = 0
rightnum = 0
cur_str = ""
for s in strs:
cur_str += s
leftnum += s.count("[")
rightnum += s.count("]")
if leftnum == rightnum:
strobjs += TypeObject.Str2Obj(cur_str)
cur_str = ""
else:
cur_str += ","
return strobjs
elif "Optional" in typestr[:index1] or "typing.Optional" in typestr[:index1]:
strobjs += TypeObject.Str2Obj(innerstr)
strobjs.append(TypeObject("None", 0))
return strobjs
if typestr[:index1].lower() in inputtypemap:
typeobj = TypeObject(inputtypemap[typestr[:index1].lower()], 0)
if "Dict" in typestr[:index1] or "Mapping" in typestr[:index1] or "Callable" in typestr[:index1]:
if "," in innerstr:
commaindex = innerstr.split(",")
leftnum = 0
rightnum = 0
cur_str = ""
count = 0
for s in commaindex:
cur_str += s
leftnum += s.count("[")
rightnum += s.count("]")
if leftnum == rightnum:
if count == 0:
typeobj.keytype += TypeObject.Str2Obj(cur_str)
else:
typeobj.valuetype += TypeObject.Str2Obj(cur_str)
count += 1
cur_str = ""
else:
cur_str += ","
strobjs.append(typeobj)
return strobjs
else:
return strobjs
else:
strs = innerstr.split(",")
leftnum = 0
rightnum = 0
cur_str = ""
for s in strs:
cur_str += s
leftnum += s.count("[")
rightnum += s.count("]")
if leftnum == rightnum:
typeobj.elementtype += TypeObject.Str2Obj(cur_str)
cur_str = ""
else:
cur_str += ","
'''
if "[" in innerstr and "]" in innerstr:
typeobj.elementtype = TypeObject.Str2Obj(innerstr)
else:
strs = innerstr.split(",")
for s in strs:
typeobj.elementtype += TypeObject.Str2Obj(s)
'''
strobjs.append(typeobj)
return strobjs
else:
typeobj = TypeObject(typestr.replace("[typing.Any]", ""), 2)
strobjs.append(typeobj)
return strobjs
elif typestr.startswith("typing") and "[" not in typestr and typestr.lower() in inputtypemap:
typeobj = TypeObject(inputtypemap[typestr.lower()], 0)
strobjs.append(typeobj)
return strobjs
else:
typeobj = TypeObject(typestr, 2)
strobjs.append(typeobj)
return strobjs
@staticmethod
def DumpObject(typeobj):
print("Type: " + typeobj.type)
print("Element Type:" + TypeObject.resolveTypeNames(typeobj.elementtype))
print("Key Type:" + TypeObject.resolveTypeNames(typeobj.keytype))
print("Value Type:" + TypeObject.resolveTypeNames(typeobj.valuetype))
@staticmethod
def DumpOriObject(typeobj):
elementtypestr = ""
for t in typeobj.elementtype:
elementtypestr += TypeObject.DumpOriObject(t) + " [SEP] "
keytypestr = ""
for t in typeobj.keytype:
keytypestr += TypeObject.DumpOriObject(t) + " [SEP] "
valuetypestr = ""
for t in typeobj.valuetype:
valuetypestr += TypeObject.DumpOriObject(t) + " [SEP] "
return "@Type: {}, Element Type: [{}], Key Type: [{}], Value Type: [{}]@".format(typeobj.type, elementtypestr, keytypestr, valuetypestr)
@staticmethod
def DumpOriObjects(typeobjs):
typestr = ""
for i, obj in enumerate(typeobjs):
typestr += "{} - {} \n".format(i, TypeObject.DumpOriObject(obj))
return typestr
def dump(self):
obj = {"type": self.type, "category": self.category, "added": self.added, "compatibletypes": self.compatibletypes, "startnodename": self.startnodename, "startnodeorder": self.startnodeorder}
elementtype = []
for i in self.elementtype:
elementtype.append(i.dump())
obj["elementtype"] = elementtype
keytype = []
for i in self.keytype:
keytype.append(i.dump())
obj["keytype"] = keytype
valuetype = []
for i in self.valuetype:
valuetype.append(i.dump())
obj["valuetype"] = valuetype
return obj
@staticmethod
def load(dictobj):
obj = TypeObject(dictobj["type"], dictobj["category"], added = dictobj["added"])
obj.compatibletypes = dictobj["compatibletypes"]
obj.startnodename = dictobj["startnodename"]
obj.startnodeorder = dictobj["startnodeorder"]
for i in dictobj["elementtype"]:
obj.elementtype.append(TypeObject.load(i))
for i in dictobj["keytype"]:
obj.keytype.append(TypeObject.load(i))
for i in dictobj["valuetype"]:
obj.valuetype.append(TypeObject.load(i))
return obj
```
#### File: HiTyper/hityper/typerule.py
```python
from hityper.typeobject import TypeObject
import hityper.tdg
from hityper import logger
from copy import copy, deepcopy
from hityper.stdtypes import builtin_method_properties, special_types, builtin_method
logger.name == __name__
class TypingRule(object):
def __init__(self):
pass
def check_failed(self, ori, rej):
if len(ori) == len(rej):
logger.warning("All types are rejected.")
def sub_act(self, operands , op, func, attr, usertypes, iterable=False, curnode = None):
if not iterable:
if len(operands) > 0:
left = operands[0]
else:
left = None
right = None
if(len(operands)>1):
right = operands[1]
else:
right = None
if(left != None and (not isinstance(left, hityper.tdg.GraphBaseNode)) or (right != None and not isinstance(right, hityper.tdg.GraphBaseNode))):
raise ValueError("Operands must be a graph node")
if (op in ["and", "or"]):
return self.binop_and_or(left, right)
elif (op == "not"):
return self.unop_not(left, right)
elif (op in ["<", "<=", ">", ">="]):
return self.binop_compare_neq(left, right)
elif (op in ["==", "!=", "is", "is not"]):
return self.binop_compare_eq(left, right)
elif (op == "+" and right != None):
return self.binop_add(operands)
elif (op == "*"):
return self.binop_mul(left, right)
elif (op in ["-", "/", "//", "%", "**", "pow"] and right != None):
return self.binop_num_op(left, right, op)
elif (op in ["+", "-", "abs"] and right == None):
return self.NumRemainSame(left, right)
elif (op == "int" and right == None):
return self.unop_int(left, right)
elif (op == "float" and right == None):
return self.unop_float(left, right)
elif (op == "divmod"):
return self.binop_divmod(left, right)
elif (op in ["|", "^", "&", "<<", ">>"]):
return self.binop_int_op(operands, func, attr, usertypes)
elif (op == "~" and right == None):
return self.unop_int_op(left, right)
elif (op == "bytes" and right == None):
return self.unop_bytes(left, right)
elif (op == "str" and right == None):
return self.unop_str(left, right)
elif (op == "tuple" and right == None):
return self.unop_tuple(left, right)
elif (op == "list" and right == None):
return self.unop_list(left, right)
elif (op == "set" and right == None):
return self.unop_set(left, right)
elif (op == "dict" and right == None):
return self.unop_dict(left, right)
elif (op == "type" and right == None):
return self.unop_type(left, right)
elif (op in ["in", "not in"] ):
return self.binop_in(left, right)
elif (op == "forin" and right == None):
return self.unop_forin(left, right)
elif (op == "append"):
return self.binop_append(left, right)
elif (op == "Subscript_Write"):
return self.triop_subscript(operands, func, attr, usertypes)
elif (op == "Subscript_Read"):
return self.binop_subscript(operands, func, attr, usertypes)
elif (op == "=" and right == None):
return self.unop_assign(left, right)
elif (op == "call"):
return self.call(operands, func, attr, usertypes, curnode)
elif (op == "List_Read"):
return self.List_Read(operands)
elif( op == "List_Write"):
return self.List_Write(operands)
elif(op == "Tuple_Read"):
return self.Tuple_Read(operands)
elif(op == "Tuple_Write"):
return self.Tuple_Write(operands)
elif(op == "Set_Read"):
return self.Set_Read(operands)
elif(op =="Dict_Read"):
return self.Dict_Read(operands)
elif(op == "JoinedStr"):
return self.JoinedStr(operands)
elif(op=="."):
return self.Attribution_Return(operands, existstype=None)
elif(op=="ListComp"):
return self.listcomp_Return(operands)
elif(op=="SetComp"):
return self.setcomp_Return(operands)
elif(op=="DictComp"):
return self.dictcomp_Retrun(operands)
elif(op=="GeneratorExp"):
return self.GeneratorExp_Return(operands)
elif(op=="yield"):
return self.yieldop(operands)
elif(op=="IfExp"):
return self.IfExp(operands)
else:
return self.unknown_op(operands, op)
#raise TypeError("Unknown Operation: " + op)
def act(self, operands , op, func, attr, usertypes, iterable=False, curnode = None):
#if not about iterable
res = self.sub_act(operands , op, func, attr, usertypes, iterable=False, curnode = curnode)
if res==None:
logger.warning("POSIIBLE NONE RETURN op is:", op)
return res
def unknown_op(self, operands, op):
logger.warning("Unknown Operation: " + op)
rej_types = []
outs = []
for i in operands:
rej_types.append([])
return rej_types, outs
def binop_and_or(self, left, right):
#rule: left and right can have arbitary type
#out: Union[left, right]
#if one is user-defined, the return type can only be True/False
ltypes = left.types
rtypes = right.types
rej_ltypes = []
rej_rtypes = []
outs = TypeObject.removeRedundantTypes(ltypes + rtypes)
return [rej_ltypes, rej_rtypes], outs
def unop_not(self, left, right):
#rule: left can have arbitary type
#out: bool(including user-defined)
rej_ltypes = []
outs = TypeObject("bool", 0)
return [rej_ltypes], outs
def binop_compare_neq(self, left, right):
#rule: left and right must have the same type, exclude user defined, type, callable
#out: bool(including user-defined)
ltypes = left.types
rtypes = right.types
rej_ltypes = []
rej_rtypes = []
outs = TypeObject("bool", 0)
for t in ltypes:
if (not TypeObject.existCompatible(t, rtypes)) or t.category != 0 or TypeObject.existSame(t, ["type", "Callable", "Dict", "None"]):
rej_ltypes.append(t)
for t in rtypes:
if (not TypeObject.existCompatible(t, ltypes)) or t.category != 0 or TypeObject.existSame(t, ["type", "Callable", "Dict", "None"]):
rej_rtypes.append(t)
return [rej_ltypes, rej_rtypes], outs
def binop_compare_eq(self, left, right):
#rule: left and right can have arbitary type
#out: bool(including user-defined)
rej_ltypes = []
rej_rtypes = []
outs = TypeObject("bool", 0)
return [rej_ltypes, rej_rtypes], outs
def binop_add(self, operands):
#rule: left and right must have the same type
#out: left/right
left = operands[0]
right = operands[1]
ltypes = left.types
rtypes = right.types
rej_ltypes = []
rej_rtypes = []
outs = []
# if ltypes or rtypes are None, add rej
# if len(ltypes)==0:
# rej_ltypes.append()
for t in ltypes:
if TypeObject.existSame(t, ["Tuple", "List"]):
temp = TypeObject(t.type, 0)
temp.elementtype += t.elementtype
rexist = False
for rt in rtypes:
if rt.type == t.type:
temp.elementtype += rt.elementtype
rexist = True
if rexist:
outs.append(temp)
elif ((not TypeObject.existSame(t, rtypes)) and (not TypeObject.existCompatible(t, rtypes))) or t.category != 0 or TypeObject.existSame(t, ["type", "Callable", "Set", "Dict", "None"]):
rej_ltypes.append(t)
elif (not TypeObject.existSame(t, outs)):
outs.append(t)
for t in rtypes:
if ((not TypeObject.existSame(t, ltypes)) and (not TypeObject.existCompatible(t, ltypes))) or t.category != 0 or TypeObject.existSame(t, ["type", "Callable", "Set", "Dict", "None"]):
rej_rtypes.append(t)
elif not TypeObject.existSame(t, outs):
outs.append(t)
if len(operands)==2:
return [rej_ltypes, rej_rtypes], outs
elif len(operands)>2:
rej = []
rej.append(rej_ltypes)
rej.append(rej_rtypes)
for i in range(2,len(operands)):
rej.append([])
return rej,outs
def binop_mul(self, left, right):
#rule: one operand must be a number, the other can not be userdefined, type, callable
#out: left (numbers need extra consideration)
ltypes = left.types
rtypes = right.types
rej_ltypes = []
rej_rtypes = []
outs = []
if not TypeObject.existType("int", ltypes) and not TypeObject.existType("bool", ltypes):
for t in rtypes:
if not TypeObject.existSame(t, ["bool", "float", "int"]):
rej_rtypes.append(t)
if not TypeObject.existType("int", rtypes) and not TypeObject.existType("bool", rtypes):
for t in ltypes:
if not TypeObject.existSame(t, ["bool", "float", "int"]):
rej_ltypes.append(t)
for t in ltypes:
if TypeObject.existSame(t, ["type", "Callable", "None", "Set", "Dict"]):
rej_ltypes.append(t)
for t in rtypes:
if TypeObject.existSame(t, ["type", "Callable", "None", "Set", "Dict"]):
rej_rtypes.append(t)
#self.check_failed(ltypes, rej_ltypes)
#self.check_failed(rtypes, rej_rtypes)
if TypeObject.existType("float", ltypes) or TypeObject.existType("float", rtypes):
outs.append(TypeObject("float", 0))
if (TypeObject.existType("int", ltypes) or TypeObject.existType("bool", ltypes)) and (TypeObject.existType("int", rtypes) or TypeObject.existType("bool", rtypes)):
outs.append(TypeObject("int", 0))
if TypeObject.existType("int", ltypes) or TypeObject.existType("bool", ltypes):
for t in rtypes:
if (t not in rej_rtypes) and (not TypeObject.existSame(t, outs)):
outs.append(t)
if TypeObject.existType("int", rtypes) or TypeObject.existType("bool", rtypes):
for t in ltypes:
if (t not in rej_ltypes) and (not TypeObject.existSame(t, outs)):
outs.append(t)
return [rej_ltypes, rej_rtypes], outs
def binop_num_op(self, left, right, op):
#rule: left and right must be numbers
#out: numbers
#mention that False%True returns int
#mention that % is also used in joinedstr. e.g. b = 'hello %s %s'%(a,a)
ltypes = left.types
rtypes = right.types
rej_ltypes = []
rej_rtypes = []
outs = []
level = 0
# for joinedstr
strtemp = TypeObject("Text",0)
if op=='%' and TypeObject.existSame(strtemp,ltypes):
outs = [TypeObject("Text", 0)]
# left can only be Text
for idx in range(len(ltypes)):
if ltypes[idx].type!="Text":
rej_ltypes.append(ltypes[idx])
return [rej_ltypes, rej_rtypes], outs
for idx in range(len(rtypes)):
if rtypes[idx].type=="bool":
rtypes[idx] = TypeObject("int", 0) # raise the type
elif not TypeObject.existSame(rtypes[idx], special_types["@number@"]):
rej_rtypes.append(rtypes[idx])
elif special_types["@number@"].index(rtypes[idx].type.lower()) > level:
level = special_types["@number@"].index(rtypes[idx].type.lower())
for idx in range(len(ltypes)):
if ltypes[idx].type=="bool":
ltypes[idx] = TypeObject("int", 0) # raise the type
for t in ltypes:
if not TypeObject.existSame(t, special_types["@number@"]):
rej_ltypes.append(t)
elif TypeObject.existSame(t, ["int", "float", "complex"]) and TypeObject.existSame(t, rtypes) and not TypeObject.existSame(t, outs):
outs.append(t)
elif TypeObject.existSame(t, ["int", "float", "complex"]) and not TypeObject.existSame(t, rtypes) and special_types["@number@"].index(t.type.lower()) > level and not TypeObject.existSame(t, outs):
outs.append(t)
for t in rtypes:
if TypeObject.existSame(t, ["int", "float", "complex"]) and special_types["@number@"].index(t.type.lower()) == level and not TypeObject.existSame(t, outs):
outs.append(t)
if op == "/" and len(ltypes) > len(rej_ltypes) and len(rtypes) > len(rej_rtypes):
outs = [TypeObject("float", 0)]
return [rej_ltypes, rej_rtypes], outs
def NumRemainSame(self, left, right):
#rule: accept numbers
#out: the original number type
#not surport user-define
ltypes = left.types
rej_ltypes = []
rej_rtypes = []
outs = []
for t in ltypes:
if not TypeObject.existSame(t, special_types["@number@"]):
rej_ltypes.append(t)
elif not TypeObject.existSame(t, outs):
outs.append(t)
return [rej_ltypes, rej_rtypes], outs
def unop_int(self, left, right):
# NO MORE USED
ltypes = left.types
rej_ltypes = []
outs = TypeObject("int", 0)
for t in ltypes:
if not TypeObject.existSame(t, ["int", "float", "complex", "bytes", "Text", "bool"]):
rej_ltypes.append(t)
return [rej_ltypes], outs
def unop_float(self, left, right):
#NO MORE USED
ltypes = left.types
rej_ltypes = []
outs = TypeObject("float", 0)
for t in ltypes:
if not TypeObject.existSame(t, ["int", "float", "complex", "bytes", "Text", "bool"]):
rej_ltypes.append(t)
return [rej_ltypes], outs
def binop_divmod(self, left, right):
# NO MORE USED
#rule: just the combination of / and % in numbers
#out: numbers
#not support user-define
# if both left and right are int => a//b , a%b int/int
# if one is float =>math.floor(a / b), a % b float/float
ltypes = left.types
rtypes = right.types
basic_type = TypeObject("int", 0)
llevel = rlevel = 1
for lt in ltypes:
if TypeObject.isCompatible(lt, basic_type):
llevel = max(llevel, special_types["@number@"].index(lt.type.lower()))
for rt in rtypes:
if TypeObject.isCompatible(rt,basic_type):
rlevel = max(rlevel, special_types["@number@"].index(rt.type.lower()))
if llevel<2 and rlevel<2:
rej_ltypes, rej_rtypes, outs = self.binop_num_op(left, right,"%")
else:
rej_ltypes, rej_rtypes, outs = self.binop_num_op(left, right,"/")
finalouts = TypeObject("Tuple", 0)
finalouts.buildTuple(outs)
return [rej_ltypes, rej_rtypes], finalouts
def binop_int_op(self,operands, func, attr, usertypes):
#rule: can accept int and bool
#out: int
#not support user-define
if len(operands)==2:
left = operands[0]
right = operands[1]
ltypes = left.types
rtypes = right.types
rej_ltypes = []
rej_rtypes = []
outs = []
temp1 = TypeObject("int", 0)
temp2 = TypeObject("bool", 0)
for t in ltypes:
if not TypeObject.existSame(t, ["int", "bool","Any", "Set"]) and t.category==0:
rej_ltypes.append(t)
for t in rtypes:
if not TypeObject.existSame(t, ["int", "bool","Any", "Set"]) and t.category==0:
rej_rtypes.append(t)
if TypeObject.existSame(TypeObject("Set", 0), ltypes) and TypeObject.existSame(TypeObject("Set", 0), rtypes):
outs.append(TypeObject.findSame(TypeObject("Set", 0), ltypes))
outs.append(TypeObject.findSame(TypeObject("Set", 0), rtypes))
outs += [temp1,temp2]
if len(ltypes) == len(rej_ltypes) or len(rtypes) == len(rej_rtypes):
return [rej_ltypes, rej_rtypes], []
else:
return [rej_ltypes, rej_rtypes], outs
elif len(operands)>2:
temp1 = TypeObject("int", 0)
temp2 = TypeObject("bool", 0)
rej = []
for inode in operands:
itypes = inode.types
rej_types = []
for t in itypes:
if not TypeObject.existSame(t, ["int", "bool","Any"]) and t.category == 0:
rej_types.append(t)
rej.append(rej_types)
return rej,[temp1,temp2]
def unop_int_op(self, left, right):
#rule: can accept int and bool
#out: int
#not support user-define
ltypes = left.types
rej_ltypes = []
outs = TypeObject("int", 0)
for t in ltypes:
if not TypeObject.existSame(t, ["int", "bool"]):
rej_ltypes.append(t)
return [rej_ltypes], outs
def unop_bytes(self, left, right):
# No more use here
ltypes = left.types
rej_ltypes = []
outs = TypeObject("bytes", 0)
for t in ltypes:
if TypeObject.existSame(t, ["List", "Tuple", "Set"]):
rej_elementtypes = []
if isinstance(t.elementtype, list):
for it in t.elementtype:
if not TypeObject.existSame(it, ["int", "bool"]):
rej_elementtypes.append(it)
elif not TypeObject.existSame(t.elementtype, ["int", "bool"]):
rej_elementtypes.append(t.elementtype)
rej_type = TypeObject(t.type, 0)
rej_type.elementtype = rej_elementtypes
rej_ltypes.append(rej_type)
elif t.type == "Dict":
rej_keytypes = []
if isinstance(t.keytype, list):
for it in t.keytype:
if not TypeObject.existSame(it, ["int", "bool"]):
rej_keytypes.append(it)
elif t.keytype not in ["int", "bool"]:
rej_keytypes.append(t.keytype)
rej_valuetypes = []
rej_type = TypeObject("Dict", 0)
rej_type.keytype = rej_keytypes
rej_type.valuetype = rej_valuetypes
rej_ltypes.append(rej_type)
elif not TypeObject.existSame(t, ["int", "bool"]):
rej_ltypes.append(t)
return [rej_ltypes], outs
def unop_str(self, left, right):
#rule: can accept any type
#out: string(including user-define)
rej_ltypes = []
outs = TypeObject("Text", 0)
return [rej_ltypes], outs
def unop_tuple(self, left, right):
#rule: can accept iterable types
#out: tuple
# not support user-define
ltypes = left.types
rej_ltypes = []
outs = TypeObject("Tuple", 0)
for t in ltypes:
if not TypeObject.existSame(t, special_types["@iterable@"]):
rej_ltypes.append(t)
elif TypeObject.existSame(t, ["Dict"]):
if isinstance(t.keytype, list):
outs.elementtype += t.elementtype
else:
outs.elementtype.append(t.elementtype)
elif TypeObject.existSame(t, ["Text"]):
outs.elementtype.append(TypeObject("Text", 0))
else:
if isinstance(t.elementtype, list):
outs.elementtype += t.elementtype
else:
outs.elementtype.append(t.elementtype)
return [rej_ltypes], outs
def unop_list(self, left, right):
#rule: can accept iterable types
#out: list
# not support user-define
ltypes = left.types
rej_ltypes = []
outs = TypeObject("List", 0)
for t in ltypes:
if not TypeObject.existSame(t, special_types["@iterable@"]):
rej_ltypes.append(t)
elif TypeObject.existSame(t, ["List"]):
if isinstance(t.keytype, list):
outs.elementtype += t.elementtype
else:
outs.elementtype.append(t.elementtype)
elif TypeObject.existSame(t, ["Text"]):
outs.elementtype.append(TypeObject("Text", 0))
else:
if isinstance(t.elementtype, list):
outs.elementtype += t.elementtype
else:
outs.elementtype.append(t.elementtype)
return [rej_ltypes], outs
def unop_set(self, left, right):
#rule: can accept iterable types
#out: set
# not support user-define
ltypes = left.types
rej_ltypes = []
outs = TypeObject("Set", 0)
for t in ltypes:
if not TypeObject.existSame(t, special_types["@iterable@"]):
rej_ltypes.append(t)
elif TypeObject.existSame(t, ["Set"]):
if isinstance(t.keytype, list):
outs.elementtype += t.elementtype
else:
outs.elementtype.append(t.elementtype)
elif TypeObject.existSame(t, ["Text"]):
outs.elementtype.append(TypeObject("Text", 0))
else:
if isinstance(t.elementtype, list):
outs.elementtype += t.elementtype
else:
outs.elementtype.append(t.elementtype)
return [rej_ltypes], outs
def unop_dict(self, left, right):
# NOUSE
#rule: can accept list, tuple, dict and set
#out: tuple
# not support user-define
###TODO!!!!
ltypes = left.types
rej_ltypes = []
rej_rtypes = []
outs = TypeObject("Dict", 0)
# 1. dict() -> new empty dictionary
if(len(ltypes)==0):
outs = TypeObject("Dict", 0)
# 2. dict(**kwargs)
return [rej_ltypes, rej_rtypes], outs
def unop_type(self, left, right):
# NOUSE
#rule: can accept arbitary type
#out: type(including user-define)
ltypes = left.types
rej_ltypes = []
outs = TypeObject("type", 0)
return [rej_ltypes], outs
def binop_in(self, left, right):
#rule: right must be tuple, dict, list, set and string
#out: bool
# not support user-define
rtypes = right.types
rej_ltypes = []
rej_rtypes = []
outs = TypeObject("bool", 0)
for t in rtypes:
if not TypeObject.existSame(t, special_types["@iterable@"]):
rej_rtypes.append(t)
return [rej_ltypes, rej_rtypes], outs
def unop_forin(self, left, right):
#rule: left must be tuple, dict, list, set and string
#out: element type of left
ltypes = left.types
rej_ltypes = []
outs = []
for t in ltypes:
if not TypeObject.existSame(t, special_types["@iterable@"]):
rej_ltypes.append(t)
elif TypeObject.existSame(t, ["Text"]):
outs.append(TypeObject("Text", 0))
elif TypeObject.existSame(t, ["Dict"]):
if isinstance(t.keytype, list):
outs += t.keytype
else:
outs.append(t.keytype)
else:
if isinstance(t.elementtype, list):
outs += t.elementtype
else:
outs.append(t.elementtype)
return [rej_ltypes], outs
def binop_append(self, left, right):
#rule: left must be list
#out: list
ltypes = left.types
rtypes = right.types
rej_ltypes = []
rej_rtypes = []
outs = TypeObject("List", 0)
for t in ltypes:
if not TypeObject.existSame(t, ["List"]):
rej_ltypes.append(t)
else:
if isinstance(t.elementtype, list):
outs.elementtype += t.elementtype
else:
outs.elementtype.append(t.elementtype)
for t in rtypes:
outs.elementtype.append(t)
return [rej_ltypes, rej_rtypes], outs
def triop_subscript(self,operands, func, attr, usertypes):
#rule: target must be dict, list
#index must be int (for list, tuple, set) and arbitary (for dict)
#value can be arbitary
#out: updated target
#TODO slices
if len(operands)==2:
# the first one is target, the second one is not used
target = operands[0]
index = operands[1]
ttypes = target.types
itypes = index.types
rej_ttypes = []
rej_itypes = []
outs = []
if TypeObject.existType("List", ttypes):
for it in itypes:
if not TypeObject.existSame(it, ["int", "bool"]):
rej_itypes.append(it)
for t in ttypes:
if not TypeObject.existSame(t, ["Dict", "List"]):
rej_ttypes.append(t)
elif TypeObject.existSame(t, ["List"]):
otype = TypeObject("List", 0)
outs.append(otype)
elif TypeObject.existSame(t, ["Dict"]):
otype = TypeObject("Dict", 0)
outs.append(otype)
return [rej_ttypes, rej_itypes], outs
elif len(operands)>2:
target = operands[0]
index = operands[1]
value = operands[2]
ttypes = target.types
itypes = index.types
vtypes = value.types
rej_ttypes = []
rej_itypes = []
rej_vtypes = []
outs = []
if TypeObject.existType("List", ttypes):
for it in itypes:
if not TypeObject.existSame(it, ["int", "bool"]):
rej_itypes.append(it)
for t in ttypes:
if t.type not in ["Dict", "List"] and t.category != 2:
rej_ttypes.append(t)
elif TypeObject.existSame(t, ["List"]):
otype = TypeObject("List", 0)
if isinstance(t.elementtype, list):
otype.elementtype += t.elementtype
else:
otype.elementtype.append(t.elementtype)
for vt in vtypes:
if not TypeObject.existSame(vt, otype.elementtype):
otype.elementtype.append(vt)
outs.append(otype)
elif TypeObject.existSame(t, ["Dict"]):
otype = TypeObject("Dict", 0)
if isinstance(t.keytype, list):
otype.keytype += t.keytype
else:
otype.keytype.append(t.keytype)
if isinstance(t.valuetype, list):
otype.valuetype += t.valuetype
else:
otype.valuetype.append(t.valuetype)
for it in itypes:
if not TypeObject.existSame(it, otype.keytype):
otype.keytype.append(it)
for vt in vtypes:
if not TypeObject.existSame(vt, otype.valuetype):
otype.valuetype.append(vt)
outs.append(otype)
elif t.category == 2:
outs.append(t)
rej = [rej_ttypes, rej_itypes, rej_vtypes]
for i in range(3,len(operands)):
rej.append([])
return rej, outs
def binop_subscript(self, operands, func, attr, usertypes):
#rule: target must be dict, list, tuple, text and bytes
#index must be int (for list, tuple) and arbitary (for dict)
#out: elementtype
if len(operands)==1:
target = operands[0]
# if just the target, we won't check the rest 2
target = operands[0]
ttypes = target.types
rej_ttypes = []
outs = []
for t in ttypes:
if not TypeObject.existSame(t, special_types["@subscriptable@"]):
rej_ttypes.append(t)
elif TypeObject.existSame(t, ["Text", "bytes", "bytearray"]):
outs.append(t)
elif TypeObject.existSame(t, ["Dict"]):
if isinstance(t.valuetype, list):
outs += t.valuetype
else:
outs.append(t.valuetype)
else:
if isinstance(t.elementtype, list):
outs += t.elementtype
else:
outs.append(t.elementtype)
# we simplify this one
outs = TypeObject.removeRedundantTypes(outs)
return [rej_ttypes], outs
elif len(operands)==2:
target = operands[0]
index = operands[1]
ttypes = target.types
itypes = index.types
rej_ttypes = []
rej_itypes = []
outs = []
# if target is dict, just add [] to rej.
if TypeObject.existType("Dict", ttypes):
rej_itypes = []
elif TypeObject.existType("List", ttypes) or TypeObject.existType("Tuple", ttypes) or TypeObject.existType(
"Text", ttypes):
# to check the rest 1 or 2
for it in itypes:
if not TypeObject.existSame(it, ["int", "bool"]):
rej_itypes.append(it)
for t in ttypes:
if not TypeObject.existSame(t, special_types["@subscriptable@"]):
rej_ttypes.append(t)
elif TypeObject.existSame(t, ["Text", "bytes", "bytearray"]):
outs.append(t)
elif TypeObject.existSame(t, ["Dict"]):
if isinstance(t.valuetype, list):
outs += t.valuetype
else:
outs.append(t.valuetype)
else:
if isinstance(t.elementtype, list):
outs += t.elementtype
else:
outs.append(t.elementtype)
outs = TypeObject.removeRedundantTypes(outs)
return [rej_ttypes, rej_itypes], outs
elif len(operands)==3:
target = operands[0]
index = operands[1]
index2 = operands[2]
ttypes = target.types
itypes = index.types
itypes2 = index2.types
rej_ttypes = []
rej_itypes = []
rej_itypes2 = []
outs = []
# if target is dict, just add [] to rej.
if TypeObject.existType("Dict", ttypes):
rej_itypes = []
rej_itypes2 = []
elif TypeObject.existType("List", ttypes) or TypeObject.existType("Tuple", ttypes) or TypeObject.existType(
"Text", ttypes):
# to check the rest 1 or 2
for it in itypes:
if not TypeObject.existSame(it, ["int", "bool"]):
rej_itypes.append(it)
for it in itypes2:
if not TypeObject.existSame(it, ["int", "bool"]):
rej_itypes2.append(it)
for t in ttypes:
if not TypeObject.existSame(t, special_types["@subscriptable@"]):
rej_ttypes.append(t)
elif TypeObject.existSame(t, ["Text", "bytes", "bytearray"]):
outs.append(t)
elif TypeObject.existSame(t, ["Dict"]):
if isinstance(t.valuetype, list):
outs += t.valuetype
else:
outs.append(t.valuetype)
else:
if isinstance(t.elementtype, list):
outs += t.elementtype
else:
outs.append(t.elementtype)
outs = TypeObject.removeRedundantTypes(outs)
return [rej_ttypes, rej_itypes,rej_itypes2], outs
elif len(operands)==4:
target = operands[0]
index = operands[1]
index2 = operands[2]
index3 = operands[3]
ttypes = target.types
itypes = index.types
itypes2 = index2.types
itypes3 = index3.types
rej_ttypes = []
rej_itypes = []
rej_itypes2 = []
rej_itypes3 = []
outs = []
# if target is dict, just add [] to rej.
if TypeObject.existType("Dict", ttypes):
rej_itypes = []
rej_itypes2 = []
rej_itypes3 = []
elif TypeObject.existType("List", ttypes) or TypeObject.existType("Tuple", ttypes) or TypeObject.existType(
"Text", ttypes):
# to check the rest 1 or 2
for it in itypes:
if not TypeObject.existSame(it, ["int", "bool"]):
rej_itypes.append(it)
for it in itypes2:
if not TypeObject.existSame(it, ["int", "bool"]):
rej_itypes2.append(it)
for it in itypes3:
if not TypeObject.existSame(it, ["int", "bool"]):
rej_itypes3.append(it)
for t in ttypes:
if not TypeObject.existSame(t, special_types["@subscriptable@"]):
rej_ttypes.append(t)
elif TypeObject.existSame(t, ["Text"]):
outs.append(TypeObject("Text", 0))
elif TypeObject.existSame(t, ["Dict"]):
if isinstance(t.valuetype, list):
outs += t.valuetype
else:
outs.append(t.valuetype)
else:
if isinstance(t.elementtype, list):
outs += t.elementtype
else:
outs.append(t.elementtype)
outs = TypeObject.removeRedundantTypes(outs)
return [rej_ttypes, rej_itypes,rej_itypes2,rej_itypes3], outs
def unop_assign(self, left, right):
if left!= None:
ltypes = left.types
rej_ltypes = []
return [rej_ltypes], ltypes
else:
logger.error("Cannot find the right value in assignment. This happens because you use a feature that is not supported by HiTyper.")
raise ValueError("Cannot find the right value in assignment. This happens because you use a feature that is not supported by HiTyper.")
def call(self, operands, func, attr, usertypes, curnode):
#====================================================================
# case 1: Class instance and user-defined types
#====================================================================
if attr==None and func in usertypes:
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
typeobject = TypeObject(func, 2)
return rej_types, [typeobject]
else:
if func in usertypes:
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
typeobject = TypeObject(func, 2)
return rej_types, [typeobject]
#====================================================================
# case 2: built-in function
#====================================================================
#====================================================================
# case 2.1: regular function
#====================================================================
if func not in builtin_method_properties["self-changable"]["overall"] and func not in builtin_method_properties["special-return"]["overall"]:
#member functions
if attr != None:
target = operands[0]
rej_types = []
rej_target_types = []
rej_arg_types = []
outs = []
accpetable_targettypes = []
returntypes = []
for i in range(1, len(operands)):
rej_arg_types.append([])
for k in builtin_method:
if func in builtin_method[k]:
accpetable_targettypes.append(k)
for t in target.types:
if TypeObject.existSame(t, accpetable_targettypes):
rule = builtin_method[TypeObject.findSame(t, accpetable_targettypes)][func]
if len(rule) == 2:
if len(rule[0]) == 0:
for o in operands:
rej_types.append([])
if "@" not in rule[1]:
returntypes = TypeObject.Str2Obj(rule[1])
else:
logger.warning("Unhandled return value for built-in function {}".format(func))
elif len(rule[0]) < len(operands) - 1:
rej_target_types.append(t)
continue
else:
for index in range(1, len(operands)):
if rule[0][index - 1][1] == "Any" or isinstance(rule[0][index - 1], list):
rej_types.append([])
continue
elif rule[0][index - 1][1].startswith("@") and rule[0][index - 1][1] in special_types:
validtypes = []
for i in special_types[rule[0][index - 1][1]]:
validtypes += TypeObject.Str2Obj(i)
elif rule[0][index - 1][1] == "@elementtype@":
validtypes = t.elementtype
elif rule[0][index - 1][1] == "@keytype@":
validtypes = t.keytype
elif rule[0][index - 1][1] == "@valuetype@":
validtypes = t.valuetype
else:
validtypes = TypeObject.Str2Obj(rule[0][index - 1][1])
for ot in operands[index].types:
if not TypeObject.existType(ot, validtypes):
rej_arg_types[index - 1].append(ot)
if "@" not in rule[1]:
returntypes = TypeObject.Str2Obj(rule[1])
else:
logger.warning("Unhandled return value for built-in function {}".format(func))
elif len(rule) > 2:
found = False
for r in rule:
if isinstance(r, list) and len(r) == len(operands) - 1:
found = True
for index in range(1, len(operands)):
if r[index - 1][1] == "Any" or isinstance(r[index - 1], list):
rej_types.append([])
continue
elif r[index - 1][1].startswith("@") and r[index - 1][1] in special_types:
validtypes = []
for i in special_types[r[index - 1][1]]:
validtypes += TypeObject.Str2Obj(i)
elif r[index - 1][1] == "@elementtype@":
validtypes = t.elementtype
elif r[index - 1][1] == "@keytype@":
validtypes = t.keytype
elif r[index - 1][1] == "@valuetype@":
validtypes = t.valuetype
else:
validtypes = TypeObject.Str2Obj(r[index - 1][1])
rej_optypes = []
for ot in operands[index].types:
if not TypeObject.existType(ot, validtypes):
rej_arg_types[index - 1].append(ot)
if "@" not in rule[-1]:
returntypes = TypeObject.Str2Obj(rule[-1])
else:
logger.warning("Unhandled return value for built-in function {}".format(func))
if found == False:
for r in rule:
if isinstance(r, list) and len(r) > len(operands) - 1:
found = True
for index in range(1, len(operands)):
if r[index - 1][1] == "Any" or isinstance(r[index - 1], list):
rej_types.append([])
continue
elif r[index - 1][1].startswith("@") and r[index - 1][1] in special_types:
validtypes = []
for i in special_types[r[index - 1][1]]:
validtypes += TypeObject.Str2Obj(i)
elif r[index - 1][1] == "@elementtype@":
validtypes = t.elementtype
elif r[index - 1][1] == "@keytype@":
validtypes = t.keytype
elif r[index - 1][1] == "@valuetype@":
validtypes = t.valuetype
else:
validtypes = TypeObject.Str2Obj(r[index - 1][1])
rej_optypes = []
for ot in operands[index].types:
if not TypeObject.existType(ot, validtypes):
rej_arg_types[index - 1].append(ot)
if "@" not in rule[-1]:
returntypes = TypeObject.Str2Obj(rule[-1])
else:
logger.warning("Unhandled return value for built-in function {}".format(func))
else:
rej_target_types.append(t)
rej_types = [rej_target_types] + rej_arg_types
outs = returntypes
return rej_types, outs
#standalone functions
else:
if func in builtin_method["standalone"]:
rej_types = []
outs = []
rule = builtin_method["standalone"][func]
for i in range(0, len(operands)):
rej_types.append([])
if len(rule) == 2:
if len(rule[0]) < len(operands):
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
for i in range(0, len(operands)):
if len(rule[0]) == 0 or rule[0][i][1] == "Any" or isinstance(rule[0][i], list):
rej_types.append([])
continue
elif rule[0][i][1].startswith("@") and rule[0][i][1] in special_types:
validtypes = []
for t in special_types[rule[0][i][1]]:
validtypes += TypeObject.Str2Obj(t)
else:
validtypes = TypeObject.Str2Obj(rule[0][i][1])
for ot in operands[i].types:
if not TypeObject.existType(ot, validtypes):
rej_types[i].append(ot)
if "@" not in rule[-1]:
returntypes = TypeObject.Str2Obj(rule[-1])
else:
logger.warning("Unhandled return value for built-in function {}".format(func))
elif len(rule) > 2:
found = False
for r in rule:
if isinstance(r, list) and len(r) == len(operands):
found = True
for i in range(0, len(operands)):
if r[i][1] == "Any" or isinstance(r[i], list):
rej_types.append([])
continue
elif r[i][1].startswith("@") and r[i][1] in special_types:
validtypes = []
for t in special_types[r[i][1]]:
validtypes += TypeObject.Str2Obj(t)
else:
validtypes = TypeObject.Str2Obj(r[i][1])
for ot in operands[i].types:
if not TypeObject.existType(ot, validtypes):
rej_types[i].append(ot)
if "@" not in rule[-1]:
returntypes = TypeObject.Str2Obj(rule[-1])
else:
logger.warning("Unhandled return value for built-in function {}".format(func))
if found == False:
for r in rule:
if isinstance(r, list) and len(r) > len(operands) - 1:
found = True
for i in range(0, len(operands)):
if r[i][1] == "Any" or isinstance(r[i], list):
rej_types.append([])
continue
elif r[i][1].startswith("@") and r[i][1] in special_types:
validtypes = []
for t in special_types[r[i][1]]:
validtypes += TypeObject.Str2Obj(t)
else:
validtypes = TypeObject.Str2Obj(r[i][1])
for ot in operands[i].types:
if not TypeObject.existType(ot, validtypes):
rej_types[i].append(ot)
if "@" not in rule[-1]:
returntypes = TypeObject.Str2Obj(rule[-1])
else:
logger.warning("Unhandled return value for built-in function {}".format(func))
outs = returntypes
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
#====================================================================
# case 2.2: self-changable function
#====================================================================
#list.append()
elif func == "append":
if attr!=None:
failed = False
rej_types = []
target = operands[0]
rej_target_types = []
temp = TypeObject("List", 0)
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["List"]):
rej_target_types.append(t)
elif TypeObject.existSame(t, ["List"]):
for types_t in t.elementtype:
temp.elementtype.append(types_t)
if len(rej_target_types) == len(target.types):
outs += target.types
failed = True
rej_types.append(rej_target_types)
rej_target_types = []
for i in range(1, len(operands)):
for t in operands[i].types:
if not TypeObject.existType(t, temp.elementtype):
temp.elementtype.append(t)
rej_types.append(rej_target_types)
if failed == False:
outs.append(temp)
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
#list,set,dict.clear()
elif func=="clear":
if attr!=None:
# mention that it will remove all the existing types!!!!!
#TODO how to deal with this situation?
rej_types = []
target = operands[0]
rej_target_types = []
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["List","Dict","set"]):
rej_target_types.append(t)
else:
temp = TypeObject(t.type, 0)
outs.append(temp)
rej_types.append(rej_target_types)
for i in range(1, len(operands)):
rej_types.append([])
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
#list.extend()
elif func == "extend":
# only 2 operands
if attr!=None:
rej_types = []
target = operands[0]
rej_target_types = []
temp = TypeObject("List", 0)
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["List"]):
rej_target_types.append(t)
elif TypeObject.existSame(t, ["List"]):
for types_t in t.elementtype:
temp.elementtype.append(types_t)
rej_types.append(rej_target_types)
rej_target_types = []
for i in range(1, len(operands)):
for t in operands[i].types:
if not TypeObject.existSame(t, ["List"]):
rej_target_types.append(t)
for types_t in t.elementtype:
if not TypeObject.existType(types_t, temp.elementtype):
temp.elementtype.append(types_t)
rej_types.append(rej_target_types)
outs.append(temp)
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
#list.insert()
elif func == "insert":
# 3 nodes here 1.list/2.int/3. obj
if attr != None:
rej_types = []
target = operands[0]
rej_target_types = []
temp = TypeObject("List", 0)
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["List"]):
rej_target_types.append(t)
elif TypeObject.existSame(t, ["List"]):
for types_t in t.elementtype:
temp.elementtype.append(types_t)
rej_types.append(rej_target_types)
# second one is int
if len(operands) > 1:
rej_target_types = []
sub_temp = operands[1]
for t in sub_temp.types:
if not TypeObject.existSame(t, ["int","bool"]):
rej_target_types.append(t)
rej_types.append(rej_target_types)
rej_target_types = []
for i in range(1, len(operands)):
for t in operands[i].types:
if not TypeObject.existType(t, temp.elementtype):
temp.elementtype.append(t)
rej_types.append(rej_target_types)
outs.append(temp)
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
#list,set.dict.pop()
elif func == "pop":
#list1.pop(1) or just pop()
# pop(key[,default])
if attr!=None:
rej_types = []
target = operands[0]
rej_target_types = []
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["List","Dict", "Set"]):
rej_target_types.append(t)
else:
# here we don't change the possible types in it because it's hard to say
outs.append(t)
rej_types.append(rej_target_types)
# the second one has to be int(for list),if there is
for i in range(1, len(operands)):
rej_types.append([])
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
#list,set.remove()
elif func=="remove" or func=="discard":
if attr!=None:
#set.discard(value)
#aList.remove('xyz');
rej_types = []
target = operands[0]
rej_target_types = []
outs = []
temp = None
for t in target.types:
if not TypeObject.existSame(t, ["List", "Set"]):
rej_target_types.append(t)
else:
# here we don't change the possible types in it because it's hard to say
temp =deepcopy(t)
outs.append(temp)
rej_types.append(rej_target_types)
# the second one has to be int(for list),if there is
for i in range(1, len(operands)):
rej_types.append([])
if temp!= None:
outs.append(temp)
else:
outs= []
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func=="update":
if attr!=None:
# dict.update(dict2)
# it is also possible that it's a user-defined function
if len(operands)>0:
rej_types = []
target = operands[0]
rej_target_types = []
outs = []
temp = TypeObject("Dict",0)
for t in target.types:
if not TypeObject.existSame(t, ["Dict"]):
rej_target_types.append(t)
else:
temp = deepcopy(t)
rej_types.append(rej_target_types)
# the second one has to be int(for list),if there is
if len(operands) > 1:
target = operands[1]
rej_target_types = []
for t in target.types:
if not TypeObject.existSame(t, ["Dict"]):
rej_target_types.append(t)
else:
temp.keytype += t.keytype
temp.valuetype += t.valuetype
temp.elementtype = temp.keytype
rej_types.append(rej_target_types)
if len(operands)>2:
for i in range(2, len(operands)):
rej_types.append([])
if temp!= None:
outs.append(temp)
else:
outs = []
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func=="intersection" or func=="intersection_update" or func== "union":
#set.intersection(set1, set2 ... etc)
#set.union(set1, set2...)
if attr != None:
rej_types = []
target = operands[0]
rej_target_types = []
temp = None
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["Set"]):
rej_target_types.append(t)
elif TypeObject.existSame(t, ["Set"]):
temp = deepcopy(t)
rej_types.append(rej_target_types)
if func!="union":
# TODO! maybe we can infer a more specific one? but too consuming now.
if len(operands) > 1:
for i in range(1, len(operands)):
target = operands[i]
rej_target_types = []
for t in target.types:
if not TypeObject.existSame(t, ["Set"]):
rej_target_types.append(t)
rej_types.append(rej_target_types)
if temp != None:
outs.append(temp)
else:
outs = []
return rej_types, outs
else:
if len(operands) > 1:
for i in range(1, len(operands)):
target = operands[i]
rej_target_types = []
for t in target.types:
if not TypeObject.existSame(t, ["Set"]):
rej_target_types.append(t)
else:
for eletype in t.elementtype:
if not TypeObject.existType(eletype.type, temp.elementtype):
temp.elementtype.append(eletype)
rej_types.append(rej_target_types)
if temp != None:
outs.append(temp)
else:
outs = []
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func=="difference" or func=="difference_update":
# z = x.difference(y)
# it equals to x-y
if attr != None:
rej_types = []
target = operands[0]
rej_target_types = []
temp = None
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["Set"]):
rej_target_types.append(t)
elif TypeObject.existSame(t, ["Set"]):
temp = deepcopy(t)
rej_types.append(rej_target_types)
target = operands[1]
rej_target_types = []
for t in target.types:
if not TypeObject.existSame(t, ["Set"]):
rej_target_types.append(t)
rej_types.append(rej_target_types)
if len(operands)>2:
for i in range(2,len(operands)):
rej_types.append([])
if temp!= None:
outs.append(temp)
else:
outs= []
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func=="add":
#fruits.add("orange")
if attr != None:
rej_types = []
target = operands[0]
rej_target_types = []
temp = None
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["Set"]):
rej_target_types.append(t)
elif TypeObject.existSame(t, ["Set"]):
temp = deepcopy(t)
rej_types.append(rej_target_types)
for i in range(1, len(operands)):
rej_types.append([])
target = operands[i]
# add the possible types in it
for intypes in target.types:
if temp!=None:
if not TypeObject.existType(intypes,temp.elementtype):
temp.elementtype.append(intypes)
if temp!= None:
outs.append(temp)
else:
outs= []
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func=="symmetric_difference" or func=="symmetric_difference_update":
# set.symmetric_difference(set)
if attr != None:
rej_types = []
target = operands[0]
rej_target_types = []
temp = None
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["Set"]):
rej_target_types.append(t)
elif TypeObject.existSame(t, ["Set"]):
temp = deepcopy(t)
rej_types.append(rej_target_types)
target = operands[1]
rej_target_types = []
for t in target.types:
if not TypeObject.existSame(t, ["Set"]):
rej_target_types.append(t)
else:
for eletype in t.elementtype:
if not TypeObject.existType(eletype.type,temp.elementtype):
temp.elementtype.append(eletype)
rej_types.append(rej_target_types)
if len(operands) > 2:
for i in range(2, len(operands)):
rej_types.append([])
if temp != None:
outs.append(temp)
else:
outs = []
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func=="popitem":
# dict.popitem()
if attr!=None:
rej_types = []
target = operands[0]
rej_target_types = []
outs = []
temp = TypeObject("Tuple",0)
for t in target.types:
if not TypeObject.existSame(t, ["Dict"]):
rej_target_types.append(t)
else:
temp.elementtype += t.keytype
temp.elementtype += t.valuetype
rej_types.append(rej_target_types)
# the second one has to be int(for list),if there is
for i in range(1, len(operands)):
rej_types.append([])
outs.append(temp)
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func=="setdefault":
# setdefault is similiar as get, but slitely different
# a.setdefault('Sex', "Never")
if attr!=None:
rej_types = []
target = operands[0]
rej_target_types = []
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["Dict"]):
rej_target_types.append(t)
else:
for item in t.valuetype:
if item!=None:
outs.append(item)
rej_types.append(rej_target_types)
# the second one has to be int(for list),if there is
if len(operands)==2:
rej_types.append([])
elif len(operands)==3:
rej_types.append([])
rej_types.append([])
for itypes in operands[2].types:
if not TypeObject.existSame(itypes,outs):
outs.append(itypes)
else:
for i in range(1, len(operands)):
rej_types.append([])
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
#====================================================================
# case 2.3: special-return function
#====================================================================
elif func=="copy":
if attr!=None:
rej_types = []
target = operands[0]
rej_target_types = []
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["List", "Dict", "Set"]):
rej_target_types.append(t)
elif TypeObject.existSame(t, ["List", "Dict", "Set"]):
outs.append(t)
rej_types.append(rej_target_types)
for i in range(1, len(operands)):
rej_types.append([])
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func == "items":
if attr != None:
rej_types = []
target = operands[0]
rej_target_types = []
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["Dict"]):
rej_target_types.append(t)
else:
temp = TypeObject("set", 0)
innerlist = TypeObject("List", 0)
for k in t.keytype:
for v in t.valuetype:
innertuple = TypeObject("Tuple", 0)
innertuple.elementtype.append(k)
innertuple.elementtype.append(v)
innerlist.elementtype.append(innertuple)
temp.elementtype.append(innerlist)
outs.append(temp)
rej_types.append(rej_target_types)
for i in range(1, len(operands)):
rej_types.append([])
return rej_types, outs
elif func == "keys":
if attr != None:
rej_types = []
target = operands[0]
rej_target_types = []
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["Dict"]):
rej_target_types.append(t)
else:
temp = TypeObject("set", 0)
innerlist = TypeObject("List", 0)
for k in t.keytype:
innerlist.elementtype.append(k)
temp.elementtype.append(innerlist)
outs.append(temp)
rej_types.append(rej_target_types)
for i in range(1, len(operands)):
rej_types.append([])
return rej_types, outs
elif func == "values":
if attr != None:
rej_types = []
target = operands[0]
rej_target_types = []
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["Dict"]):
rej_target_types.append(t)
else:
temp = TypeObject("set", 0)
innerlist = TypeObject("List", 0)
for k in t.valuetype:
innerlist.elementtype.append(k)
temp.elementtype.append(innerlist)
outs.append(temp)
rej_types.append(rej_target_types)
for i in range(1, len(operands)):
rej_types.append([])
return rej_types, outs
elif func == "abs":
# as for NOTAttribute function, we have to make sure self.attr==None
if attr == None:
ltypes = operands[0].types
rej_target_types = []
rej_ltypes = []
outs = []
for t in ltypes:
if not TypeObject.existSame(t, special_types["@number@"]):
rej_target_types.append(t)
elif not TypeObject.existSame(t, outs):
if t !=None:
outs.append(t)
rej_ltypes.append(rej_target_types)
for i in range(1, len(operands)):
rej_ltypes.append([])
return rej_ltypes, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func == "divmod":
# as for NOTAttribute function, we have to make sure self.attr==None
if attr == None:
left = operands[0]
right = operands[1]
ltypes = left.types
rtypes = right.types
basic_type = TypeObject("int", 0)
llevel = rlevel = 1
for lt in ltypes:
if TypeObject.isCompatible(lt, basic_type):
llevel = max(llevel, special_types["@number@"].index(lt.type))
for rt in rtypes:
if TypeObject.isCompatible(rt, basic_type):
rlevel = max(rlevel, special_types["@number@"].index(rt.type))
if llevel < 2 and rlevel < 2:
[rej_ltypes, rej_rtypes], outs = self.binop_num_op(left, right, "%")
else:
[rej_ltypes, rej_rtypes], outs = self.binop_num_op(left, right, "/")
finalouts = TypeObject("Tuple", 0)
finalouts.buildTuple(outs)
return [rej_ltypes, rej_rtypes], finalouts
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func == "enumerate":
if len(operands) <= 2 and len(operands) >= 1:
rej_types = []
outs = []
elementtype = []
for i in operands:
rej_types.append([])
for t in operands[0].types:
if not TypeObject.existSame(t, special_types["@iterable@"]):
rej_types[0].append(t)
elif TypeObject.existSame(t, ["Dict"]):
elementtype += t.keytype
elif TypeObject.existSame(t, ["Text", "bytes", "bytearray"]):
elementtype.append(t)
else:
elementtype += t.elementtype
if len(operands) == 2:
for t in operands[1].types:
if not TypeObject.existSame(t, ["bool", "int"]):
rej_types[1].append(t)
temp = TypeObject("Generator", 0)
for e in elementtype:
innertuple = TypeObject("Tuple", 0)
innertuple.elementtype = [TypeObject("int", 0), e]
temp.elementtype.append(innertuple)
outs.append(temp)
return rej_types, outs
else:
rej_types = []
outs = []
for i in range(0, len(operands)):
rej_types.append([])
return rej_types, outs
elif func == "round":
if attr == None:
# if one ,return int
if len(operands)==1:
ltypes = operands[0].types
rej_target_types = []
rej_ltypes = []
temp = TypeObject("int", 0)
outs = []
for t in ltypes:
if not TypeObject.existSame(t, special_types["@number@"]):
rej_target_types.append(t)
rej_ltypes.append(rej_target_types)
for i in range(1, len(operands)):
rej_ltypes.append([])
outs.append(temp)
return rej_ltypes, outs
# if two ,return float (in this function naming is a problem, maybe fixed later :) )
elif len((operands))==2:
ltypes = operands[0].types
rtypes = operands[1].types
rej_target_types = rej_rtarget_types=[]
rej_ltypes = []
temp = TypeObject("float", 0)
outs = []
for t in ltypes:
if not TypeObject.existSame(t, special_types["@number@"]):
rej_target_types.append(t)
rej_ltypes.append(rej_target_types)
for t in rtypes:
if not TypeObject.existSame(t, ["bool", "int"]):
rej_rtarget_types.append(t)
rej_ltypes.append(rej_rtarget_types)
outs.append(temp)
return rej_ltypes, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func == "sorted":
# the first has to be iterable
# list debug ok , dict not debugging
if attr == None:
first = operands[0].types
rej_target_types = []
rej_ltypes = []
temp = TypeObject("List", 0)
outs = []
for t in first:
if not TypeObject.existSame(t, special_types["@iterable@"]):
rej_target_types.append(t)
elif TypeObject.existSame(t, ["Dict"]):
temp.elementtype += t.keytype
elif TypeObject.existSame(t, ["Text", "bytes", "bytearray"]):
temp.elementtype.append(t)
else:
if isinstance(t.elementtype, list):
temp.elementtype += t.elementtype
else:
temp.elementtype.append(t.elementtype)
rej_ltypes.append(rej_target_types)
for i in range(1, len(operands)):
rej_ltypes.append([])
outs.append(temp)
return rej_ltypes, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func == "sum":
if attr == None:
# the first has to be iterable
first = operands[0].types
rej_target_types = []
rej_ltypes = []
isinitial = True
outs = []
simplelevel_up = 0
for t in first:
if not TypeObject.existSame(t, special_types["@iterable@"]):
rej_target_types.append(t)
for elet in t.elementtype:
if isinitial:
isinitial = False
if TypeObject.existSame(elet, special_types["@number@"]):
simplelevel_up = special_types["@number@"].index(elet.type)
else:
simplelevel_up = 0
else:
if TypeObject.existSame(elet, special_types["@number@"]):
simplelevel_up = max(special_types["@number@"].index(elet.type), simplelevel_up)
else:
simplelevel_up = max(1,simplelevel_up)
temp = TypeObject(special_types["@number@"][simplelevel_up], 0)
rej_ltypes.append(rej_target_types)
for i in range(1, len(operands)):
rej_ltypes.append([])
outs.append(temp)
return rej_ltypes, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func == "tuple":
# as for NOTAttribute function, we have to make sure self.attr==None
if attr == None:
if len(operands)==0:
rej_ltypes = []
outs = TypeObject("Tuple", 0)
return [], [outs]
else:
ltypes = operands[0].types
rej_ltypes = []
outs = TypeObject("Tuple", 0)
for t in ltypes:
if not TypeObject.existSame(t, special_types["@iterable@"]):
rej_ltypes.append(t)
elif TypeObject.existSame(t, ["Dict"]):
outs.elementtype = t.keytype
elif TypeObject.existSame(t, ["Text", "bytes", "bytearray"]):
outs.elementtype.append(t)
else:
outs.elementtype = t.elementtype
return [rej_ltypes], outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func == "list":
# as for NOTAttribute function, we have to make sure self.attr==None
if attr == None:
if len(operands)==0:
rej_ltypes = []
outs = TypeObject("List", 0)
return [], [outs]
elif len(operands) == 1:
ltypes = operands[0].types
rej_ltypes = []
outs = TypeObject("List", 0)
for t in ltypes:
if not TypeObject.existSame(t, special_types["@iterable@"]):
rej_ltypes.append(t)
elif TypeObject.existSame(t, ["Dict"]):
outs.elementtype = t.keytype
elif TypeObject.existSame(t, ["Text", "bytes", "bytearray"]):
outs.elementtype.append(t)
else:
outs.elementtype = t.elementtype
return [rej_ltypes], outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func=="set":
# as for NOTAttribute function, we have to make sure self.attr==None
rej_ltypes = []
if attr == None:
if len(operands)==0:
outs = TypeObject("Set", 0)
return [], [outs]
elif len(operands)==1:
ltypes = operands[0].types
outs = TypeObject("Set", 0)
for t in ltypes:
if not TypeObject.existSame(t, special_types["@iterable@"]):
rej_ltypes.append(t)
elif TypeObject.existSame(t, ["Dict"]):
outs.elementtype = t.keytype
elif TypeObject.existSame(t, ["Text", "bytes", "bytearray"]):
outs.elementtype.append(t)
else:
outs.elementtype = t.elementtype
return [rej_ltypes], [outs]
else:
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
else:
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func=="frozenset":
# as for NOTAttribute function, we have to make sure self.attr==None
rej_ltypes = []
if attr == None:
if len(operands)==0:
outs = TypeObject("frozenset", 0)
return [], [outs]
else:
ltypes = operands[0].types
outs = TypeObject("Set", 0)
for t in ltypes:
if not TypeObject.existSame(t, special_types["@iterable@"]):
rej_ltypes.append(t)
elif TypeObject.existSame(t, ["Dict"]):
outs.elementtype = t.keytype
elif TypeObject.existSame(t, ["Text", "bytes", "bytearray"]):
outs.elementtype.append(t)
else:
outs.elementtype = t.elementtype
return [rej_ltypes], [outs]
else:
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func == "type":
# as for NOTAttribute function, we have to make sure self.attr==None
if attr == None:
if len(operands) == 1:
rej_types = [[]]
outs = []
temp =TypeObject("type", 0)
temp.elementtype = operands[0].types
outs.append(temp)
return rej_types, outs
elif len(operands) == 3:
rej_types = [[], [], []]
outs = []
for t in operands[0].types:
if not TypeObject.existSame(t, ["str"]):
rej_types[0].append(t)
for t in operands[1].types:
if not TypeObject.existSame(t, ["tuple"]):
rej_types[1].append(t)
for t in operands[2].types:
if not TypeObject.existSame(t, ["dict"]):
rej_types[2].append(t)
temp = TypeObject("type", 0)
outs.append(temp)
return rej_types, outs
else:
rej_types = []
outs = []
for i in range(0, len(operands)):
rej_types.append([])
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func == "next":
if attr == None:
if len(operands) == 1:
rej_types = []
outs = []
elementtype = []
for i in range(0, len(operands)):
rej_types.append([])
for t in operands[0].types:
if not TypeObject.existSame(t, ["Generator"]):
rej_types[0].append(t)
else:
elementtype = t.elementtype
outs += elementtype
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func == "max" or func=="min":
# the input can be iterable or some argument
# if only one iterable e.g b = max([1,2,4]) / b = max({'a':1,'b':2})
if attr==None:
ltypes = operands[0].types
rej_target_types = []
rej_ltypes = []
outs = []
if len(operands)==1:
temp = []
for t in ltypes:
if not TypeObject.existSame(t, special_types["@iterable@"]):
rej_target_types.append(t)
elif TypeObject.existSame(t, ["Dict"]):
if isinstance(t.keytype, list):
outs += t.keytype
else:
outs.append(t.keytype)
elif TypeObject.existSame(t, ["Text", "bytes", "bytearray"]):
outs.append(t)
else:
if t.elementtype!=[]:
if t.elementtype[0]!= None:
outs.append(t.elementtype[0]) # for list/dict is also okay as it returns keytype
rej_ltypes.append(rej_target_types)
for i in range(1, len(operands)):
rej_ltypes.append([])
# outs.append(temp)
return rej_ltypes, outs
# with many arguments e.g b = max(1,2,3.1) b = max([1,2],[0,4])
else:
ifsimple = False
first = operands[0]
simplelevel_up =simplelevel_down = 0
isinitial = True
for indexop in operands:
for ftypes in indexop.types:
if not TypeObject.existSame(ftypes, special_types["@iterable@"]):
ifsimple = True
if isinitial:
isinitial = False
if TypeObject.existSame(ftypes, special_types["@number@"]):
simplelevel_up = simplelevel_down = special_types["@number@"].index(ftypes.type)
else:
simplelevel_up = simplelevel_down = 1
else:
if TypeObject.existSame(ftypes, special_types["@number@"]):
simplelevel_up = max(special_types["@number@"].index(ftypes.type),simplelevel_up)
simplelevel_down = min(special_types["@number@"].index(ftypes.type), simplelevel_down)
else:
simplelevel_up = max(1,simplelevel_up)
simplelevel_down = max(1,simplelevel_down)
# if it's like b = max([1,2],[0,4])
elif TypeObject.existSame(ftypes, ["Dict"]):
outs += ftypes.keytype
for i in range(0, len(operands)):
rej_ltypes.append([])
return rej_ltypes, outs
else:
if len(outs)==0:
outs.append(ftypes)
elif not TypeObject.existSame(ftypes,outs):
outs.append(ftypes)
# add all the possible types
if ifsimple:
for i in range(simplelevel_down,simplelevel_up+1):
temp = TypeObject(special_types["@number@"][i],"0")
outs.append(temp)
for i in range(0, len(operands)):
rej_ltypes.append([])
return rej_ltypes, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
elif func == "get":
if attr!=None:
# a.get('Sex', "Never")
rej_types = []
target = operands[0]
rej_target_types = []
outs = []
for t in target.types:
if not TypeObject.existSame(t, ["Dict"]):
rej_target_types.append(t)
else:
for item in t.valuetype:
if item!=None:
outs.append(item)
rej_types.append(rej_target_types)
# the second one has to be int(for list),if there is
for i in range(1, len(operands)):
rej_types.append([])
return rej_types, outs
else:
rej_ltypes = []
for i in range(0, len(operands)):
rej_ltypes.append([])
outs = []
return rej_ltypes, outs
#====================================================================
# case 3: inter-procedural Analysis
#====================================================================
elif curnode.tg != None and curnode.tg.globaltg != None:
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
outs = []
for t in curnode.tg.globaltg.tgs:
if t.classname == curnode.tg.classname and t.name == func and func != curnode.tg.name:
returntype = t.getReturnType()
if len(returntype) == 0:
return rej_types, outs
else:
outs += returntype
return rej_types, outs
return rej_types, outs
#====================================================================
# case 4: unrecognized functions
#====================================================================
else:
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
outs = []
return rej_types, outs
def List_Read(self, operands):
# here we do not consider about userdefine first. e.g list1 = [1,'2',user-define instance]
# mention that the depth here is 2 e.g List = [1,1,'2',[1,2,3,4],[1,2,'hello']] then the result out will
# only be Typeobject(list) with elementtype [int, int, text, list, list]
# we would not merge two same types together because it could help in the List_write inference e.g a= [1,2,'3'] [x,y,z] = a
outs = []
temp = TypeObject("List",0)
for i in range(len(operands)):
# add the type not exists in the elementtype
if isinstance(operands[i].types, list):
temp.elementtype += operands[i].types
else:
temp.elementtype.append(operands[i].types)
outs.append(temp)
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
return rej_types,outs
def List_Write(self,operands):
# input: operands output:[[]], [outs_types]
# in this func, we first find the possible types from the input, then we add all the possible types into them
# e.g. c= [1,'2',3] [a,a1,a2]= c
# due to the reason that type pass after this node spreads, we will infer that out = [int,text]
rej_types = []
outs = []
inputtypes = []
if len(operands) != 1:
logger.error("The length of input nodes for ListWrite should be 1, we get {} here.".format(len(operands)))
raise ValueError("The length of input nodes for ListWrite should be 1")
'''
elif len(operands[0].outs) != 1:
logger.error("The operand's out length should be 1, we get {} here.".format(len(operands[0].outs)))
raise ValueError("The operand's out length should be 1")
'''
for insnode in operands[0].ins:
if isinstance(insnode, hityper.tdg.SymbolNode):
if isinstance(insnode.types, list):
# here we add all the elementtype rather than types
for eacheletype in insnode.types:
if not isinstance(eacheletype.elementtype, list):
inputtypes.append(eacheletype.elementtype)
elif isinstance(eacheletype.elementtype, list):
inputtypes += eacheletype.elementtype
outs = TypeObject.removeRedundantTypes(inputtypes)
return [rej_types], outs
def Tuple_Read(self,operands):
# similiar to List Read
outs = []
temp = TypeObject("Tuple", 0)
for i in range(len(operands)):
if isinstance(operands[i].types, list):
temp.elementtype += operands[i].types
else:
temp.elementtype.append(operands[i].types)
outs.append(temp)
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
return rej_types,outs
def Tuple_Write(self,operands):
# input: operands output:[[]], [outs_types]
# in this func, we first find the possible types from the input, then we add all the possible types into them
# e.g. c= (1,'2',3) (a,a1,a2)= c => we will infer out=[int, text].
rej_types = []
outs = []
inputtypes = []
if len(operands) != 1:
logger.error("The length of input nodes for TupleWrite should be 1, we get {} here.".format(len(operands)))
raise ValueError("The length of input nodes for TupleWrite should be 1")
# here we do not constrain the out length because it can be like below:
# for i, (setting_value, setting_type) in enumerate(zip(all_values, all_types)):
# elif len(operands[0].outs) != 1:
# raise ValueError("The operand's out length should be 1")
if operands[0].name != "forin":
for insnode in operands[0].ins:
if isinstance(insnode, hityper.tdg.SymbolNode):
if isinstance(insnode.types, list):
# here we add all the elementtype rather than types
for eacheletype in insnode.types:
if not isinstance(eacheletype.elementtype, list):
inputtypes.append(eacheletype.elementtype)
elif isinstance(eacheletype.elementtype, list):
inputtypes += eacheletype.elementtype
elif isinstance(insnode, hityper.tdg.TypeGenNode): # like forin node
if isinstance(insnode.types, list):
# here we add all the elementtype rather than types
for eacheletype in insnode.types:
if not isinstance(eacheletype.elementtype, list):
inputtypes.append(eacheletype.elementtype)
elif isinstance(eacheletype.elementtype, list):
inputtypes += eacheletype.elementtype
outs = TypeObject.removeRedundantTypes(inputtypes)
return [rej_types], outs
# if it's realized by forin
else:
for insnode in operands[0].types:
if isinstance(insnode, hityper.tdg.TypeObject):
if not isinstance(insnode.elementtype, list):
# here we add all the elementtype rather than types
inputtypes.append(insnode.elementtype)
elif isinstance(insnode.elementtype, list):
inputtypes += insnode.elementtype
outs = TypeObject.removeRedundantTypes(inputtypes)
return [rej_types], outs
def Set_Read(self,operands):
# similiar to List Read
rej_types = []
outs = []
temp = TypeObject("Set", 0)
for i in range(len(operands)):
if isinstance(operands[i].types, list):
temp.elementtype += operands[i].types
else:
temp.elementtype.append(operands[i].types)
outs.append(temp)
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
return rej_types,outs
def Dict_Read(self,operands):
# similiar to List Read,but add keytype and valuetype
rej_types = []
outs = []
temp = TypeObject("Dict", 0)
# according to the rules, the first half are keytypes and the left half are valuetypes
if(len(operands)%2!=0):
logger.warning('len(operands) is odd. case a: lambda case b: {**kw}' )
for i in range(int(len(operands)/2)):
if isinstance(operands[i].types, list):
temp.elementtype += operands[i].types
else:
temp.elementtype.append(operands[i].types)
temp.keytype = temp.elementtype
for i in range(int(len(operands)/2),len(operands)):
if isinstance(operands[i].types, list):
temp.valuetype += operands[i].types
else:
temp.valuetype.append(operands[i].types)
outs.append(temp)
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
return rej_types,outs
def JoinedStr(self,operands):
rej_types = []
outs = []
for i in range(0, len(operands)):
rej_types.append([])
outs = [TypeObject("Text", 0)]
return rej_types, outs
def Attribution_Return(self,operands,existstype=None):
outs = []
'''
if existstype==None:
# it means no existstype here
temp = TypeObject("Any",0)
outs.append(temp)
'''
rej_types = []
for i in range(0, len(operands)):
rej_types.append([])
return rej_types, outs
def dictcomp_Retrun(self,operands):
temp = TypeObject("Dict", 0)
outs = []
rej_types = []
# there are 2 operands, one is the symbol of element, the other is the the symbol of value
if len(operands) == 2:
rej_target_types = []
# element types
ltypes = operands[0].types
if isinstance(ltypes, list):
temp.elementtype += ltypes
else:
temp.elementtype.append(ltypes)
temp.keytype = temp.elementtype
rej_types.append(rej_target_types)
# value types
rej_target_types = []
ltypes = operands[1].types
if isinstance(ltypes, list):
temp.valuetype += ltypes
else:
temp.valuetype.append(ltypes)
rej_types.append(rej_target_types)
outs.append(temp)
return rej_types, outs
def listcomp_Return(self,operands):
temp = TypeObject("List", 0)
outs = []
rej_types = []
# there is 1 operand, one is the symbol of element,
if len(operands) == 1:
rej_target_types = []
# element types
ltypes = operands[0].types
if isinstance(ltypes, list):
temp.elementtype += ltypes
else:
temp.elementtype.append(ltypes)
rej_types.append(rej_target_types)
# value types
outs.append(temp)
return rej_types, outs
else:
for i in range(len(operands)):
rej_types.append([])
return rej_types, outs
def setcomp_Return(self,operands):
temp = TypeObject("Set", 0)
outs = []
rej_types = []
# there is 1 operand, one is the symbol of element,
if len(operands) == 1:
rej_target_types = []
# element types
ltypes = operands[0].types
if isinstance(ltypes, list):
temp.elementtype += ltypes
else:
temp.elementtype.append(ltypes)
rej_types.append(rej_target_types)
# value types
outs.append(temp)
return rej_types, outs
else:
for i in range(len(operands)):
rej_types.append([])
return rej_types, outs
def GeneratorExp_Return(self,operands):
temp = TypeObject("Generator", 0)
outs = []
rej_types = []
# there is 1 operand, one is the symbol of element,
if len(operands) == 1:
rej_target_types = []
# element types
ltypes = operands[0].types
if isinstance(ltypes, list):
temp.elementtype += ltypes
else:
temp.elementtype.append(ltypes)
rej_types.append(rej_target_types)
# value types
outs.append(temp)
return rej_types, outs
else:
for i in range(len(operands)):
rej_types.append([])
return rej_types, outs
def yieldop(self, operands):
temp = TypeObject("Generator", 0)
outs = []
rej_types = []
for o in operands:
for t in o.types:
if not TypeObject.existSame(t, temp.elementtype):
temp.elementtype.append(t)
rej_types.append([])
outs.append(temp)
return rej_types, outs
def IfExp(self, operands):
if len(operands) != 2:
logger.warning("IfExp requires 2 arguements, currently get {}".format(len(operands)))
raise ValueError("IfExp requires 2 arguements, currently get {}".format(len(operands)))
outs = []
rej_types = [[], []]
for o in operands:
for i in o.types:
if not TypeObject.existSame(i, outs):
outs.append(i)
return rej_types, outs
``` |
{
"source": "JohnnyPeng18/MatasanoCrypto",
"score": 3
} |
#### File: MatasanoCrypto/matasano/attacker.py
```python
import abc
import time
import math
import random
import functools
import itertools
import collections
import string
import matasano.oracle
import matasano.blocks
import matasano.stats
import matasano.prng
import matasano.stream
import matasano.util
import matasano.hash
import matasano.public
import matasano.mac
import matasano.math
__author__ = 'aldur'
class Attacker(object):
"""The generic, abstract, attacker."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def __init__(self, oracle: matasano.oracle.Oracle):
self.oracle = oracle
@abc.abstractmethod
def attack(self) -> bool:
"""
Perform the attack against the oracle.
The default implementation does nothing.
:return: True if the attack was successful.
"""
return False
class Eavesdropper(object):
"""The generic, abstract, eavesdropper."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def attack(self) -> bool:
"""
Perform the eavesdrop attack.
The default implementation does nothing.
:return: True if the attack was successful.
"""
return False
class AttackerProfileForUser(Attacker):
"""
An admin-user forger.
:param oracle: An instance of OracleProfileForUser.
"""
def __init__(self, oracle: matasano.oracle.OracleProfileForUser):
super().__init__(oracle)
"""
A block, whose decryption ends perfectly with the last "="
of the kv string.
"""
self._base_user = None
self._role = None
def get_base_user_profile(self):
"""
Ask the oracle for a specific profile, such that it is of the form:
user=...&uid=...&role= || ...
As you can see, we want the role to be isolated on a single block.
"""
email_suffix = "@foo.com"
fixed = sum(
len(s) for s
in (
"email", "uid", "role", # The keys of the string
"&", "&", "=", "=", "=", # The meta characters
"1", # The user ID (1-9 is fine)
)
)
email = "a" * (32 - fixed - len(email_suffix)) + email_suffix
assert len(email) + fixed == 32
email = email.encode("ascii")
self._base_user = self.oracle.experiment(email)[0:32]
def get_role_block(self):
"""
Ask the oracle for a block of the form:
... || admin | padding || ...
We can provide the oracle only emails.
Let's build an ad-hoc trap.
"""
fixed = sum(
len(s) for s
in (
"email", # The keys of the string
"=", # The meta characters
)
)
email = b"a" * (16 - fixed - len(b"@")) + b"@"
assert len(email) + fixed == 16
email += matasano.blocks.pkcs_7(b"admin", 16)
assert len(email) + fixed == 32
email += b".com"
self._role = self.oracle.experiment(email)[16:32]
def attack(self) -> bool:
"""
Perform the attack.
Get the base block, add the role, and ask for result to the Oracle.
"""
assert self._base_user
assert self._role
user = self._base_user + self._role
return self.oracle.guess(user)
class AttackerBitFlippingCBC(Attacker):
"""
The attacker against the Bit Flipping CBC Oracle.
Forge a byte buffer such that, once encrypted,
will be manipulated in order to create a user
having admin rights (i.e. containing the string
";admin=true;"
)
We know for sure that the oracle escapes the meta
characters ";" and "=".
As a consequence, we won't use them, and we'll
manipulate the CBC cipher-text.
"""
def __init__(self, oracle: matasano.oracle.OracleBitflipping):
super().__init__(oracle)
self.prefix_len = 32 # The len of the string prefixed
def attack(self) -> bool:
"""
Perform the attack against the oracle.
:return: True if the attack was successful.
"""
# The prefix string is exactly 32 bytes,
# so we can simply ignore it
# We'll use the first block in order to manipulate the next one.
trap = b"\x00" * 16
# XOR the meta chars, in order to hide them
trap += bytes((ord(";") ^ 1,)) # 1-st
trap += b"admin"
trap += bytes((ord("=") ^ 1,)) # 7-th
trap += b"true"
cipher = bytearray(self.oracle.experiment(trap))
for i in (0, 6):
cipher[self.prefix_len + i] ^= 1
return self.oracle.guess(bytes(cipher))
class AttackerByteAtATimeEcb(Attacker):
"""
The attacker against the One Byte at a Time Ecb Oracle.
The oracle holds an unknown string.
The attacker's goal are:
- guess the block size of encryption, as used by the oracle (16)
- guess the AES encryption mode (ECB)
- discover the unknown fixed string, one byte at a time.
"""
def __init__(self, oracle: matasano.oracle.OracleByteAtATimeEcb):
super().__init__(oracle)
self.block_size = -1
self.unhidden_string = b""
@staticmethod
def get_fill_bytes_len(i: int, block_size: int, prefix_len: int = 0) -> int:
"""
We want the i-th byte after the input to be the last of a block.
i.e. i equal to 0 means we want the first byte after the input,
and that this byte is the last of a block.
Return the number of bytes to send to the oracle,
knowing that it will prefix them with prefix_len bytes.
... | fill_bytes | ....i || ...
:param i: The index of the interested byte.
:param block_size: The block size.
:param prefix_len: The len of the string prefixed to the attacker's input.
"""
assert i >= 0
assert prefix_len >= 0
fill_bytes_len = \
block_size - (i % block_size) - 1
if prefix_len:
fill_bytes_len -= prefix_len % block_size
if fill_bytes_len < 0:
fill_bytes_len %= block_size
if not fill_bytes_len:
fill_bytes_len += block_size
assert 0 < fill_bytes_len <= 16, \
"Got wrong fill_bytes_len: {}".format(fill_bytes_len)
return fill_bytes_len
def attack(self) -> bool:
"""
Perform the attack against the oracle.
:return: True if the attack was successful.
"""
self.discover_block_size()
is_ecb = self.discover_encryption_mode()
if not is_ecb:
# We don't know how to do it!
return False
# The len of the hidden string.
# Excluding padding.
hidden_string_len = len(
self.oracle.experiment(b"0" * self.block_size)
) - self.block_size
for i in range(hidden_string_len):
byte = self.byte_discovery(i)
self.unhidden_string += byte
self.unhidden_string = self.unhidden_string.rstrip(b"\x00")
self.unhidden_string = matasano.blocks.un_pkcs_7(
self.unhidden_string, self.block_size
)
return self.oracle.guess(self.unhidden_string)
def byte_discovery(self, i: int) -> bytes:
"""
Attack the oracle in order to know the ith
byte of the hidden string.
:param i: byte of interest position
:return: The ith byte of the hidden string.
"""
assert self.block_size > 0, \
"Please discover the block size before calling me!"
assert 0 <= i <= len(self.unhidden_string), \
"You're missing the string prefix!"
"""
The byte we want to discover,
must be the last of a block.
We need to submit at least a byte to the oracle.
Thus we can prepend 1 to block_len bytes of buffer to be encrypted.
"""
"""
The total number of bytes that we must supply to the oracle,
for the byte at index i to be last of a block.
"""
fill_bytes_len = AttackerByteAtATimeEcb.get_fill_bytes_len(
i, self.block_size
)
"""
The bytes that we will be comparing.
"""
slice_to_ith_block = matasano.blocks.bytes_to_block(
self.block_size,
matasano.blocks.ith_byte_block(self.block_size, i + 1)
)
"""
The string we will send the oracle while building
the comparison map.
"""
trap = b"\x00" * fill_bytes_len + self.unhidden_string
comparison = {
self.oracle.experiment(
trap + c
)[slice_to_ith_block]: c
for c in (bytes(chr(c), "ascii") for c in range(0, 128))
}
"""
Now we simply remove the already unhidden string from the trap.
"""
trap = b"\x00" * fill_bytes_len
cipher = self.oracle.experiment(
trap
)[slice_to_ith_block]
# assert cipher in comparison, \
# "Returned cipher is not in previous comparison dictionary. " \
# "Something went wrong!"
# When we get to padding bytes, we can't decrypt anymore.
return comparison.get(cipher, b"\x00")
def discover_block_size(self) -> int:
"""
Discover the block size used by the oracle,
by feeding it a byte at the time.
When the size of the cipher will change,
we'll have found our block size!
:return: The block size used by the oracle.
"""
i = 1
b = b"A" * i
block_size = len(self.oracle.experiment(b))
while True:
i += 1
b = b"A" * i
t_block_size = len(self.oracle.experiment(b))
if block_size != t_block_size:
self.block_size = t_block_size - block_size
return self.block_size
def discover_encryption_mode(self) -> bool:
"""
Try guessing the encryption mode of the oracle.
As usual, finding equal blocks means that the encryption
mode is probably stateless (ECB).
:return: True if the oracle is using ECB.
"""
assert self.block_size > 0, \
"Please discover the block size before calling me!"
b = b"\x00" * self.block_size * 3
cipher = self.oracle.experiment(b)
return matasano.blocks.any_equal_block(cipher)
class AttackerHarderByteAtATimeEcb(AttackerByteAtATimeEcb):
"""
The attacker against the Harder One Byte at a Time Ecb Oracle.
The oracle holds an unknown string.
The attacker's goal are:
- guess the block size of encryption, as used by the oracle (16)
- guess the AES encryption mode (ECB)
- discover the unknown fixed string, one byte at a time.
It's harder respect to One Byte at a Time because the oracle,
before encrypting, prefix the attacker's input with a Random,
static string.
"""
def __init__(self, oracle: matasano.oracle.OracleHarderByteAtATimeEcb):
super().__init__(oracle)
self.prefix_len = -1
def discover_fixed_string_len(self) -> int:
"""
Discover the length of the fixed string prefix.
First of all, discover the last block containing the prefix.
How? Let the oracle encrypt to single bytes.
The first block in which the encryption differ is the
last one of the prefix.
As a special case, the prefix length could be a multiple of the
block size. We'll handle this case later.
Now, we know the last block size.
Start letting the oracle encrypt a growing number of bytes:
0, 00, 000, 0000, 00000
Confront the result.
When we found that the current result for the last block,
is equal to the previous one, we've found the length.
... || prefix | 0000 || ... == ... || prefix | 0000 || 0..
:return: The length of the fixed string prefix.
"""
assert self.block_size > 0, \
"Please discover the block size before calling me!"
a = self.oracle.experiment(b"a")
b = self.oracle.experiment(b"b")
last_prefix_block = -1 # The prefix string lies in those blocks
block_slice = None
assert len(a) == len(b)
for i in range(0, len(a) // self.block_size):
block_slice = matasano.blocks.bytes_in_block(self.block_size, i)
if a[block_slice] != b[block_slice]:
last_prefix_block = i
break
assert last_prefix_block != -1, \
"Something went wrong while finding the last prefix block."
previous = a
for i in range(2, self.block_size + 1):
new = self.oracle.experiment(b"a" * i)
if previous[block_slice] == new[block_slice]:
prefix_len = self.block_size - i + 1
break
else:
previous = new
else:
prefix_len = 0
prefix_len += self.block_size * last_prefix_block
self.prefix_len = prefix_len
return prefix_len
def byte_discovery(self, i: int) -> bytes:
"""
Attack the oracle in order to know the ith
byte of the hidden string.
:param i: byte of interest position
:return: The ith byte of the hidden string.
"""
assert self.block_size > 0, \
"Please discover the block size before calling me!"
assert self.prefix_len > 0, \
"Please discover the prefix len before calling me!"
assert 0 <= i <= len(self.unhidden_string), \
"You're missing the string prefix!"
"""
The byte we want to discover,
must be the last of a block.
We need to submit at least a byte to the oracle.
Thus we can prepend 1 to block_len bytes of buffer to be encrypted.
"""
"""
The total number of bytes that we must supply to the oracle,
for the byte at index i to be last of a block.
"""
fill_bytes_len = AttackerByteAtATimeEcb.get_fill_bytes_len(
i, self.block_size, self.prefix_len
)
"""
The bytes that we will be comparing.
"""
slice_to_ith_block = matasano.blocks.bytes_to_block(
self.block_size,
matasano.blocks.ith_byte_block(
self.block_size, self.prefix_len + i + 1
)
)
"""
The string we will send the oracle while building
the comparison map.
"""
trap = b"\x00" * fill_bytes_len + self.unhidden_string
comparison = {
self.oracle.experiment(
trap + c
)[slice_to_ith_block]: c
for c in (bytes(chr(c), "ascii") for c in range(0, 128))
}
"""
Now we simply remove the already unhidden string from the trap.
"""
trap = b"\x00" * fill_bytes_len
cipher = self.oracle.experiment(
trap
)[slice_to_ith_block]
# assert cipher in comparison, \
# "Returned cipher is not in previous comparison dictionary. " \
# "Something went wrong!"
# When we get to padding bytes, we can't decrypt anymore.
return comparison.get(cipher, b"\x00")
def attack(self) -> bool:
"""
Perform the attack against the oracle.
:return: True if the attack was successful.
"""
self.discover_block_size()
is_ecb = self.discover_encryption_mode()
if not is_ecb:
# We don't know how to do it!
return False
self.discover_fixed_string_len()
# The len of the hidden string.
# Excluding padding.
hidden_string_len = len(
self.oracle.experiment(b"0" * self.block_size)
) - (self.prefix_len % self.block_size) - self.block_size
for i in range(hidden_string_len):
byte = self.byte_discovery(i)
self.unhidden_string += byte
self.unhidden_string = self.unhidden_string.rstrip(b"\x00")
self.unhidden_string = matasano.blocks.un_pkcs_7(
self.unhidden_string,
self.block_size
)
return self.oracle.guess(self.unhidden_string)
class AttackerCBCPadding(Attacker):
"""
The attacker against the CBC padding oracle.
The oracle holds an unknown string.
The attacker's goal is to discover such string.
The oracle provides a method to check whether
the plaintext related to a given ciphertext has been
correctly padded.
This is a side-channel and we'll use it.
"""
def __init__(self, oracle: matasano.oracle.OracleCBCPadding):
super().__init__(oracle)
self.discovered_string = b""
def attack(self) -> bool:
"""
The oracle provides a padding check method.
An attacker can exploit such method to discover
the encrypted string, while ignoring the encryption key.
How?
For each block of the ciphertext, reveal bytes from last to first.
To do this, create a custom IV, whose byte of interest is set
to values from 0 to 255.
Send the block and the IV to the decryption oracle and check
if the padding is correct.
If it is correct, then AES_CBC_D(block) ^ custom IV produces
a correct padding.
As a consequence, the byte of interest ^ its position
(i.e. the padding) ^ the same byte of the previous block reveals
the original plaintext.
"""
ciphertext, iv = self.oracle.challenge()
previous = iv
for b in range(len(ciphertext) // 16):
discovered = [] # Store already discovered bytes of the block
block = ciphertext[
matasano.blocks.bytes_in_block(
16, b
)
]
for i in reversed(range(16)):
padding_value = 16 - i
trap = matasano.util.random_bytes_range(i)
for j in range(256):
_trap = trap + bytes((j,))
if padding_value > 1:
suffix = bytes((
padding_value ^ previous_value
for previous_value
in discovered
))
_trap += suffix
assert len(_trap) == 16, \
"Got bad _trap len {}".format(len(_trap))
if self.oracle.experiment(
_trap,
block
):
discovered.insert(0, j ^ padding_value)
break
else:
raise Exception(
"Something went wrong while attacking the padding oracle - "
"block #{}, byte #{}".format(b, i)
)
assert len(discovered) == 16
self.discovered_string += bytes((
previous[i] ^ v for i, v in enumerate(discovered)
))
previous = block
return self.oracle.guess(self.discovered_string)
class AttackerFixedNonceCTR(Attacker):
"""
The attacker against the fixed nonce CTR oracle.
The oracle holds a tuple of unknown strings.
The attacker's goal is to discover such strings.
"""
def __init__(self, oracle: matasano.oracle.OracleFixedNonceCTR):
super().__init__(oracle)
self.discovered_strings = tuple()
def attack(self) -> bool:
"""
Employ text analysis tools to discover the encrypted strings.
All the strings have been encrypted by using the same key-space.
So the attack methodology is similar to the one used against
Vigenere.
Specifically, split in buckets the oracle's challenge.
Attack each bucket by guessing its XOR key char.
Iterate until the result is satisfying (i.e. all chars are ASCII).
"""
buffers = self.oracle.challenge()
buffers = [bytearray(b) for b in buffers]
max_len = len(max(buffers, key=lambda b: len(b)))
buckets = [
[
b[i] if len(b) > i else None # String len differs
for b
in buffers
]
for i in range(max_len)
]
# Guess the key
key = [
tuple((
ord(c) for c in
matasano.stats.most_likely_xor_chars(
bytes([byte for byte in b if byte is not None]),
3
)
))
for b in buckets
]
assert len(key) == len(buckets)
k_used = {
k: 0 for k in range(len(key))
}
k = 0
while k < len(key):
v = key[k]
new_buffers = buffers[:]
for buffer in new_buffers:
if len(buffer) <= k:
continue # Skip completed buffers
buffer[k] ^= v[k_used[k]]
if buffer[k] >= 128 and k_used[k] < len(v) - 1:
k_used[k] += 1
break
else:
buffers = new_buffers
k += 1
self.discovered_strings = tuple(
bytes(b) for b in buffers
)
return self.oracle.guess(self.discovered_strings)
class AttackerMT19937Seed(Attacker):
"""
Guess the oracle's seed by brute-force.
Try the possible combinations of seed/output
after calling the oracle.
:param oracle: The oracle to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleMT19937Seed):
super().__init__(oracle)
self.discovered_seed = None
def attack(self) -> bool:
"""
Guess the oracle's seed.
:return: The attack result.
"""
start_time = int(time.time())
challenge = self.oracle.challenge()
outputs = {
matasano.prng.MT19937(seed).extract_number(): seed
for seed in range(
start_time + self.oracle.sleep_min,
start_time + self.oracle.sleep_max + 1
)
}
assert challenge in outputs, \
"Something went wrong, can't find challenge in outputs."
self.discovered_seed = outputs[challenge]
return self.oracle.guess(self.discovered_seed)
class AttackerMT19937Clone(Attacker):
"""
Clone the MT PRNG hold by the Oracle,
by inverting the tempering function
for each of the values output by the oracle,
and passing the result to a newly created MT clone.
:param oracle: The oracle to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleMT19937Clone):
super().__init__(oracle)
self.next_random_numbers = []
@staticmethod
def untemper_one(y: int):
"""
Reverse the first tempering transformation:
y = x ^ x >> 11
:param y: The tempering result.
:return: The value x that produced y.
"""
prefix = (y >> 21) & 0x07ff # The first 11 MSB do not change
middle = (y >> 10) & 0x07ff
middle ^= prefix
suffix = y & 0x03ff
suffix ^= middle >> 1
x = 0x00
x |= prefix << 21
x |= middle << 10
x |= suffix
return x
@staticmethod
def untemper_two(y: int):
"""
Reverse the second tempering transformation:
y = x ^ x << 7 & 2636928640
:param y: The tempering result.
:return: The value x that produced y.
"""
suffix = y & 0x7f # Last 7 bits are copied
middle_one = (y >> 7) & 0x7f
middle_one ^= ((2636928640 >> 7) & 0x7f) & suffix
middle_two = (y >> 14) & 0x7f
middle_two ^= ((2636928640 >> 14) & 0x7f) & middle_one
middle_three = (y >> 21) & 0x7f
middle_three ^= ((2636928640 >> 21) & 0x7f) & middle_two
prefix = (y >> 28) & 0x0f
prefix ^= ((2636928640 >> 28) & 0x0f) & middle_three
x = 0x00
x |= prefix << 28
x |= middle_three << 21
x |= middle_two << 14
x |= middle_one << 7
x |= suffix
return x
@staticmethod
def untemper_three(y: int):
"""
Reverse the second-last tempering transformation:
y = x ^ x << 15 & 4022730752
:param y: The tempering result.
:return: The value x that produced y.
"""
suffix = y & 0x7fff # Last 15 bits are copied
middle = (y >> 15) & 0x7fff
middle ^= ((4022730752 >> 15) & 0x7fff) & suffix
prefix = middle & 0x03 # MSB bits of 4022730752 are set so & is ignored
prefix ^= (y >> 30) & 0x03
x = 0x00
x |= prefix << 30
x |= middle << 15
x |= suffix
return x
@staticmethod
def untemper_four(y: int):
"""
Reverse the last tempering transformation.
y = x ^ x >> 18
:param y: The tempering result.
:return: The value x that produced y.
"""
return y ^ (y >> 18)
@staticmethod
def untemper(y: int):
"""Invert the tempering function applied to n from the Oracle's PRNG.
We're interested in finding x, given y.
temper(x) = y
untemper(y) = x
:param y: The tempering result.
:return: The value x that produced y.
"""
x = AttackerMT19937Clone.untemper_four(y)
x = AttackerMT19937Clone.untemper_three(x)
x = AttackerMT19937Clone.untemper_two(x)
x = AttackerMT19937Clone.untemper_one(x)
return x
def attack(self) -> bool:
"""
Clone the oracle's PRNG.
:return: True whether the attacks is successful.
"""
challenge = self.oracle.challenge()
challenge = [
AttackerMT19937Clone.untemper(y)
for y in challenge
]
mt_prng = matasano.prng.MT19937(0)
mt_prng.mt = challenge
self.next_random_numbers = list(
mt_prng.extract_number()
for _ in range(10)
)
return self.oracle.guess(self.next_random_numbers)
class AttackerMT19937Stream(Attacker):
"""
Guess the oracle's seed (i.e. the encryption key).
:param oracle: The oracle to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleMT19937Stream):
super().__init__(oracle)
self.key = None
def attack(self) -> bool:
"""
Clone the oracle's PRNG.
:return: True whether the attacks is successful.
"""
challenge = self.oracle.challenge()
for seed in range(0, 2 ** 16):
if (matasano.stream.mt19937_stream(
seed,
challenge
))[-len(self.oracle.known_plaintext):] == self.oracle.known_plaintext:
self.key = seed
break
else:
raise Exception("Something went wrong while brute-forcing the seed.")
return self.oracle.guess(self.key)
class AttackerRandomAccessCTR(Attacker):
"""
Guess the Oracle's hidden plaintext.
:param oracle: The oracle to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleRandomAccessCTR):
super().__init__(oracle)
self.discovered_plaintext = None
def attack(self) -> bool:
"""
Replace the plaintext with 0s.
Once done, you get the exact key, and can recover the hidden plaintext.
"""
challenge = self.oracle.challenge()
key = bytes(
self.oracle.experiment(i, 0)[i]
for i in range(len(challenge))
)
assert len(key) == len(challenge)
self.discovered_plaintext = matasano.util.xor(
challenge,
key
)
return self.oracle.guess(bytes(self.discovered_plaintext))
class AttackerBitFlippingCTR(Attacker):
"""
The attacker against the Bit Flipping CTR Oracle.
Forge a byte buffer such that, once encrypted,
will be manipulated in order to create a user
having admin rights (i.e. containing the string
";admin=true;"
)
We know for sure that the oracle escapes the meta
characters ";" and "=".
As a consequence, we won't use them, and we'll
manipulate the CTR cipher-text.
"""
def __init__(self, oracle: matasano.oracle.OracleBitflipping):
super().__init__(oracle)
self.prefix_len = 32
def attack(self) -> bool:
"""
Perform the attack against the oracle.
:return: True if the attack was successful.
"""
trap = bytearray(b"foo;admin=true")
indexes = (trap.index(b";"), trap.index(b"="))
for i in indexes:
trap[i] ^= 1
trap = bytes(trap)
cipher = bytearray(self.oracle.experiment(trap))
for i in indexes:
cipher[self.prefix_len + i] ^= 1
return self.oracle.guess(bytes(cipher))
class AttackerCBCKeyIV(Attacker):
"""
The oracle is using AES CBC.
The IV is the same key.
The attacker's goal is to discover the key.
:param oracle:
"""
def __init__(self, oracle: matasano.oracle.OracleCBCKeyIV):
super().__init__(oracle)
def attack(self) -> bool:
"""
Acquire the challenge.
Tamper it.
Ask the oracle to decrypt and decode it.
On error the oracle will raise a proper exception,
containing the decoded plaintext.
XORring two blocks of such plaintext will
reveal the key.
"""
challenge = bytearray(self.oracle.challenge())
r = matasano.blocks.bytes_in_block(16, 1)
for i in range(r.start, r.stop):
challenge[i] = 0
r = matasano.blocks.bytes_in_block(16, 2)
for i in range(r.start, r.stop):
challenge[i] = challenge[i - 16 * 2]
try:
self.oracle.experiment(
bytes(challenge)
)
except matasano.oracle.BadAsciiPlaintextException as e:
p = e.recovered_plaintext
first = p[matasano.blocks.bytes_in_block(16, 0)]
third = p[matasano.blocks.bytes_in_block(16, 2)]
return self.oracle.guess(
matasano.util.xor(
first,
third
)
)
assert False, \
"Something went wrong while attacking the oracle."
class AttackerKeyedMac(Attacker):
"""
Attack a MD-based keyed MAC.
Ask the oracle for the MAC of a message.
Clone the inner hash function status,
and glue pad the message.
Forge a new MAC for any new message whose prefix
is the previously built string.
:param oracle: The oracle to be attacked.
:param hash_function: The hash function used to generate the MAC.
:param padding_function: The function used to pad the message.
:param hash_to_state: The function to retrieve the state from the hash digest.
"""
def __init__(
self,
oracle: matasano.oracle.OracleKeyedMac,
hash_function,
padding_function,
hash_to_state
):
super().__init__(oracle)
self.hash_function = hash_function
self.padding_function = padding_function
self.hash_to_state = hash_to_state
self.forged_message = None
self.forged_mac = None
def attack(self) -> bool:
"""
Attack the oracle by forging a MAc
for an unseen string.
:return: True if the attack succeeds.
"""
message = (
b"comment1=cooking%20MCs;userdata=foo;"
b"comment2=%20like%20a%20pound%20of%20bacon"
)
trap = b";admin=true"
challenge = self.oracle.challenge(message)
state = self.hash_to_state(challenge)
# Get the padded message and add the trap suffix.
padded_message = self.padding_function(
b"0" * self.oracle.key_len + message
)
padded_message += trap
# Remove unknown key.
self.forged_message = padded_message[self.oracle.key_len:]
# Forge the MAC.
self.forged_mac = self.hash_function(
message=trap,
state=state,
fake_byte_len=len(padded_message)
)
return self.oracle.guess(
self.forged_message,
self.forged_mac
)
class AttackerSHA1KeyedMac(AttackerKeyedMac):
"""
Attack a SHA1-keyed-MAC oracle.
"""
def __init__(self, oracle: matasano.oracle.OracleSHA1KeyedMac):
super().__init__(
oracle,
matasano.hash.SHA1,
matasano.hash.sha1_pad,
matasano.util.from_big_endian_unsigned_ints
)
class AttackerMD4KeyedMac(AttackerKeyedMac):
"""
Attack a SHA1-keyed-MAC oracle.
"""
def __init__(self, oracle: matasano.oracle.OracleMD4KeyedMac):
super().__init__(
oracle,
matasano.hash.MD4,
matasano.hash.md4_pad,
matasano.util.from_little_endian_unsigned_ints
)
class AttackerRemoteSHA1HMac(Attacker):
"""
Exploit the timing of the remote server checks
to brute-force the oracle's secret key.
"""
def __init__(self, oracle: matasano.oracle.OracleRemoteSHA1HMac):
super().__init__(oracle)
self.forged_mac = None
def attack(self) -> bool:
"""
The oracle is up and running.
We query the remote server and keep
the byte whose average response time has been worst.
This code is not perfect, but further improvements
would be un-meaningful.
"""
message = b"a"
self.forged_mac = bytearray(
len(
matasano.oracle.OracleRemoteSHA1HMac.mac_function(b"key", message)
)
)
sleep_time = matasano.oracle.OracleRemoteSHA1HMac.sleep_time
for i, _ in enumerate(self.forged_mac):
times = dict()
for b in range(256):
self.forged_mac[i] = b
start = time.time()
self.oracle.experiment(message, self.forged_mac)
stop = time.time()
times[b] = stop - start
times = {
k: v for k, v in times.items()
if v >= (i + 1) * sleep_time
}
while len(times) > 1:
_times = dict()
for b in times.keys():
self.forged_mac[i] = b
start = time.time()
self.oracle.experiment(message, self.forged_mac)
stop = time.time()
_times[b] = stop - start
times = {
k: v for k, v in _times.items()
if v >= (i + 1) * sleep_time
}
time.sleep(5)
self.forged_mac[i] = list(times.keys())[0]
time.sleep(5)
return self.oracle.guess(
message,
self.forged_mac
)
class EavesdropperDH(Eavesdropper, matasano.public.DHEntity):
"""
MITM attack against an instance of the DH protocol.
Replace Alice's and Bob's public keys with the default p.
"""
def __init__(
self,
alice: matasano.public.DHEntity,
bob: matasano.public.DHEntity
):
super(EavesdropperDH, self).__init__()
self.alice, self.bob = alice, bob
self.alice_pub, self.bob_pub = None, None
self.eavesdropped_message = None
def dh_protocol_respond(self, p: int, g: int, pub_a: int):
"""
The usual response to the DH protocol.
Forward this response to Bob,
after replacing the public key with p.
:param p: The group modulo.
:param g: A primitive root of p.
:param pub_a: Alice's DH public key.
"""
self.alice_pub = pub_a
self.bob_pub = self.bob.dh_protocol_respond(p, g, p)
self._session_key = 0
return p
def receive_and_send_back(self, ciphertext: bytes) -> bytes:
"""
Receive the message from Alice,
forward it to Bob.
:param ciphertext: The message from Alice to Bob.
"""
bob_answer = self.bob.receive_and_send_back(ciphertext)
self.eavesdropped_message = matasano.public.DHEntity.decipher_received_message(
matasano.public.DHEntity.session_key_to_16_aes_bytes(self._session_key),
ciphertext
)
return bob_answer
def attack(self) -> bool:
"""
Trigger the protocol and perform the MITM attack.
"""
message = b"MessageInABottle" # Make sure it's a multiple of 16
assert len(message) % 16 == 0
self.alice.dh_protocol(self)
self.alice.send_and_receive(self, message)
return self.eavesdropped_message == message
class EavesdropperAckDH(Eavesdropper, matasano.public.DHAckEntity):
"""
MITM attack against an instance of the DH protocol (with ACK).
Replace the group parameter g with:
* 1
* p
* p -1
"""
def __init__(
self,
alice: matasano.public.DHAckEntity,
bob: matasano.public.DHAckEntity,
g: int = 1
):
super(EavesdropperAckDH, self).__init__()
self.alice, self.bob = alice, bob
self.eavesdropped_message = None
self.malicious_g = g
def set_group_parameters(self, p: int, g: int):
"""
Replace the g parameter and forward to Bob.
:param p: The group modulo.
:param g: A primitive root of p.
:return: True.
"""
self.bob.set_group_parameters(p, self.malicious_g)
return super().set_group_parameters(p, g)
def dh_protocol_respond(self, p: int, g: int, pub_a: int):
"""
The usual response to the DH protocol.
Forward this response to Bob,
after replacing the public key with p.
:param p: The group modulo.
:param g: A primitive root of p.
:param pub_a: Alice's DH public key.
"""
pub_bob = self.bob.dh_protocol_respond(p, self.malicious_g, pub_a)
if self.malicious_g == 1:
# Both Public keys are always gonna be equal to 1.
self._session_key = 1
elif self.malicious_g == p - 1:
"""
((p - 1) ^ (a * b)) mod p
can produce either 1 or -1.
It depends on whether a * b is even or not.
For the same reason,
if pub_a == p - 1, then priv_a was not even.
So all we need is to compare the public keys to
discover the session key.
"""
self._session_key = p - 1 \
if pub_bob == p - 1 and pub_a == p - 1 \
else 1
elif self.malicious_g == p:
# Both Public keys are always gonna be equal to 0.
self._session_key = 0
else:
assert False, \
"Something went wrong while MITMing Alice and Bob, bad G value."
return pub_bob
def receive_and_send_back(self, ciphertext: bytes) -> bytes:
"""
Receive the message from Alice,
forward it to Bob.
:param ciphertext: The message from Alice to Bob.
"""
bob_answer = self.bob.receive_and_send_back(ciphertext)
self.eavesdropped_message = matasano.public.DHEntity.decipher_received_message(
matasano.public.DHEntity.session_key_to_16_aes_bytes(self._session_key),
ciphertext
)
return bob_answer
def attack(self) -> bool:
"""
Trigger the protocol and perform the MITM attack.
"""
message = b"MessageInABottle" # Make sure it's a multiple of 16
assert len(message) % 16 == 0
self.alice.dh_protocol(self, matasano.public.dh_nist_p, self.malicious_g)
self.alice.send_and_receive(self, message)
return self.eavesdropped_message == message
class EavesdropperSimplifiedSRPServer(
matasano.public.SimplifiedSRPServer,
Eavesdropper
):
"""
Brute-force the client's signature in order to discover its password.
"""
def __init__(self):
super(Eavesdropper, self).__init__()
super(matasano.public.SimplifiedSRPServer, self).__init__(password=bytes())
self.b = 1
self.B = self.g
self.u = 1
self._salt = matasano.util.bytes_for_int(256)
self.A = -1
self.client_signature = None
self.client_password = None
def srp_protocol_one(self, A: int) -> tuple:
"""
Complete the phase one of the protocol, responding to the client.
:param A: The client's public key.
"""
self.A = A
return self._salt, self.B, self.u
def srp_protocol_two(self, signature: bytes) -> bool:
"""
Return True.
:param signature: The client's produced MAC.
:return: Whether the signature is correct.
"""
self.client_signature = signature
return True
def attack(self) -> bool:
"""
Offline brute-force the client's password from the HMAC signature.
"""
for password in matasano.util.get_password_wordlist():
digest = matasano.hash.SHA256(self._salt + password)
x = int.from_bytes(digest, 'little')
v = pow(self.g, x, self.N)
s = pow(
self.A * pow(v, self.u, self.N),
self.b,
self.N
)
self._K = matasano.hash.SHA256(
matasano.util.bytes_for_int(s)
)
if matasano.mac.hmac_sha256(
self._K,
self._salt
) == self.client_signature:
self.client_password = password
return True
else:
return False
class AttackerRSABroadcast:
"""
Perform the Coppersmith's attack.
https://en.wikipedia.org/wiki/Coppersmith%27s_Attack
All the following ciphertexts are encryption of the same message.
The public key value is fixed to 3.
The modulus of each key is different from the others.
:param ciphertext_one: The first encryption.
:param pub_one: The first public key.
:param ciphertext_two: The second encryption.
:param pub_two: The second public key.
:param ciphertext_three: The third encryption.
:param pub_three: The third public key.
"""
def __init__(
self,
ciphertext_one: int,
pub_one: matasano.public.RSA_Pub,
ciphertext_two: int,
pub_two: matasano.public.RSA_Pub,
ciphertext_three: int,
pub_three: matasano.public.RSA_Pub,
):
self.c_0, self.n_0 = ciphertext_one, pub_one.n
self.c_1, self.n_1 = ciphertext_two, pub_two.n
self.c_2, self.n_2 = ciphertext_three, pub_three.n
def attack(self) -> bytes:
"""
Perform the attack and return the discovered secret.
:return: The discovered secret plaintext.
"""
m_s_0 = self.n_1 * self.n_2
m_s_1 = self.n_0 * self.n_2
m_s_2 = self.n_0 * self.n_1
n = self.n_0 * self.n_1 * self.n_2
result = sum([
self.c_0 * m_s_0 * matasano.math.modinv(m_s_0, self.n_0),
self.c_1 * m_s_1 * matasano.math.modinv(m_s_1, self.n_1),
self.c_2 * m_s_2 * matasano.math.modinv(m_s_2, self.n_2),
])
result %= n
return matasano.util.bytes_for_int(
int(math.ceil(pow(result, 1 / 3.0)))
)
class AttackerUnpaddedRSARecovery(Attacker):
"""
Exploit RSA's homomorphic encryption property
and trick the oracle into decrypting the secret.
:param oracle: The oracle to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleUnpaddedRSARecovery):
super(AttackerUnpaddedRSARecovery, self).__init__(oracle)
def attack(self) -> bool:
"""
Multiply the oracle's challenge with a new
ciphertext (trap), whose plaintext value is known
(s).
After the oracle's decryption the resulting
plaintext will be multiplied by s.
:return: True if the attack is successful.
"""
cipher, pub = self.oracle.challenge()
s = random.randint(1, pub.n - 1)
inv_s = matasano.math.modinv(s, pub.n)
trap = (pow(s, pub.e, pub.n) * cipher) % pub.n
plaintext = self.oracle.experiment(trap)
plaintext = int.from_bytes(plaintext, byteorder="little")
plaintext = (plaintext * inv_s) % pub.n
return self.oracle.guess(
matasano.util.bytes_for_int(
plaintext
)
)
class AttackerRSAPaddedSignatureVerifier(Attacker):
"""
Forge a signature by creating a perfect cube,
that respects the PKCS1.5 padding rules.
:param oracle: The oracle's to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleRSAPaddedSignatureVerifier):
super(AttackerRSAPaddedSignatureVerifier, self).__init__(oracle)
def attack(self) -> bool:
"""
Find a perfect cube that, when represented in binary form,
has the prefix matching the expected padding.
A working signature can be forged by using such cube.
"""
message, pub = self.oracle.challenge()
assert pub.e == 3, \
"Can't attack if Oracle public key is not 3."
pad_f = matasano.oracle.OracleRSAPaddedSignatureVerifier.pad_function
hash_f = matasano.oracle.OracleRSAPaddedSignatureVerifier.hash_function
block_size = matasano.oracle.OracleRSAPaddedSignatureVerifier.block_size
byteorder = 'big' # Because we're messing with the suffix
padded_message = pad_f(
hash_f(message)
)
padded_message += b"\x00" * block_size * 2
n = int.from_bytes(padded_message, byteorder=byteorder)
forged_signature = matasano.math.integer_cube_root(n) + 1
forged_message = pow(forged_signature, 3)
assert forged_message < pub.n
forged_message = matasano.util.bytes_for_int(
forged_message,
length=len(padded_message),
byteorder=byteorder
)
assert padded_message[:block_size] == forged_message[:block_size]
return self.oracle.guess(forged_signature)
class AttackerDSA(Attacker):
"""
Generic DSA attacker.
:param oracle: The oracle to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleDSA):
super().__init__(oracle)
self.private_key_x = -1
@staticmethod
def compute_private_key(
public_key: matasano.public.DSA_Pub,
signature: matasano.public.DSA_Signature,
digest: int,
r_inv: int,
k: int
):
"""
Compute the private key given following parameters.
:param public_key: The signer's public key.
:param signature: A signature.
:param digest: The digest of the message producing the signature.
:param r_inv: The inverse mod q of the r value of the signature.
:param k: The signature's nonce.
:return: The private key.
"""
return (((signature.s * k) - digest) * r_inv) % public_key.q
@staticmethod
def is_private_key_valid(
public_key: matasano.public.DSA_Pub,
x: int
) -> bool:
"""
Check whether a computed private key is valid.
:param public_key: The oracle's public key.
:param x: A computed private key.
:return: True if the computed private key is valid.
"""
return pow(public_key.g, x, public_key.p) == public_key.y
def attack(self) -> bool:
"""
Subclasses should implement attack.
"""
return False
class AttackerDSAKeyFromNonce(AttackerDSA):
"""
Recover the DSA private key by brute-forcing the nonce value.
:param oracle: The oracle to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleDSAKeyFromNonce):
super(AttackerDSAKeyFromNonce, self).__init__(oracle)
def attack(self) -> bool:
"""
Brute-force the nonce value and discover the Oracle's private key.
"""
oracle_type = type(self.oracle)
y, p, q, g = self.oracle.public_key
r, s = oracle_type.signature
r_inv = matasano.math.modinv(r, q)
digest = oracle_type.hash_to_int(
oracle_type.hash_function(oracle_type.message)
)
compute_private_key = functools.partial(
type(self).compute_private_key,
public_key=self.oracle.public_key,
signature=oracle_type.signature,
digest=digest,
r_inv=r_inv
)
for k in oracle_type.k_range:
x = compute_private_key(
k=k
)
if type(self).is_private_key_valid(
self.oracle.public_key,
x
):
break
else:
assert False, \
"Something went wrong while brute-forcing the nonce."
self.private_key_x = x
return self.oracle.guess(self.private_key_x)
class AttackerDSAKeyFromRepeatedNonce(AttackerDSA):
"""
Recover the DSA private key by finding those signatures
generated by using the same nonce more than once.
:param oracle: The oracle to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleDSAKeyFromRepeatedNonce):
super().__init__(oracle)
def attack(self) -> bool:
"""
Find duplicated nonces and discover the oracle's private key.
"""
signatures_and_messages = self.oracle.challenge()
y, p, q, g = self.oracle.public_key
for pair in itertools.combinations(signatures_and_messages, 2):
first, second = pair
k = (
(
(first.h - second.h) % q
) * (
matasano.math.modinv((first.s - second.s) % q, q)
)
) % q
x = type(self).compute_private_key(
self.oracle.public_key,
signature=matasano.public.DSA_Signature(first.r, first.s),
digest=first.h,
r_inv=matasano.math.modinv(first.r, q),
k=k
)
if type(self).is_private_key_valid(self.oracle.public_key, x):
break
else:
assert False, \
"Something went wrong while discovering the oracle's private key."
self.private_key_x = x
return self.oracle.guess(self.private_key_x)
class AttackerRSAParity(Attacker):
"""
Exploit the parity function in order to apply
a binary search on the message space.
:param oracle: The oracle to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleRSAParity):
super().__init__(oracle)
self.message = None
def attack(self) -> bool:
"""
Attack the oracle by repeatedly checking the parity
of a tampered ciphertext.
"""
cipher, public = self.oracle.challenge()
e, n = public
lower = 0
upper = 1
denominator = 1
multiplier = pow(2, e, n)
for _ in range(n.bit_length()):
half = upper - lower
assert half >= 0, \
"Got bad half value {}.".format(half)
lower *= 2
upper *= 2
denominator *= 2
cipher = (cipher * multiplier) % n
if self.oracle.experiment(cipher):
upper -= half
else:
lower += half
self.message = matasano.util.bytes_for_int(n * upper // denominator)
print(self.message)
assert upper >= lower, \
"Something wrong while updating values ({} vs {}).".format(
upper,
lower
)
return self.oracle.guess(self.message)
class AttackerRSAPadding(Attacker):
"""
Exploit the RSA padding oracle in order to apply
the Bleichenbacher [98] adaptive CC attack.
(http://archiv.infsec.ethz.ch/education/fs08/secsem/bleichenbacher98.pdf)
:param oracle: The oracle to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleRSAPadding):
super().__init__(oracle)
self.message = None
def attack(self) -> bool:
"""
Attack the oracle by repeatedly checking the parity
of a tampered ciphertext.
"""
cipher, public = self.oracle.challenge()
assert self.oracle.experiment(cipher)
e, n = public
immutable_slice = collections.namedtuple(
"immutable_slice", ["start", "stop"]
)
k = n.bit_length()
B = pow(2, k - 16)
B2 = 2 * B
B3 = 3 * B
s_i = -1
c0 = cipher
M = {immutable_slice(B2, B3 - 1), }
def _is_done() -> bool:
"""
Check whether the attack should stop.
:return: True or false.
"""
if len(M) != 1:
return False
_M = M.copy()
inner_slice = _M.pop()
return inner_slice.start == inner_slice.stop
def _update_m() -> set:
"""
After finding a new value for s_i,
update the M set.
"""
new_m = set()
for a, b in M:
for r in range(
matasano.math.int_division_ceil(a * s_i - B3 + 1, n),
matasano.math.int_division_floor(b * s_i - B2, n) + 1
):
new_a = max(a, matasano.math.int_division_ceil(B2 + r * n, s_i))
new_b = min(b, matasano.math.int_division_floor(B3 - 1 + r * n, s_i))
if new_a <= new_b:
new_m.add(
immutable_slice(
new_a,
new_b
)
)
return new_m
def _iterate(s_minus_1: int) -> int:
"""
Iterate for a new round.
:param s_minus_1: The previous s_i-1 value.
:return: The new s_i value.
"""
assert len(M) != 0, \
"M should contain at least one element."
if len(M) > 1:
for inner_s in range(s_minus_1 + 1, n):
if self.oracle.experiment(c0 * pow(inner_s, e, n)):
return inner_s
else: # len(M) == 1
a, b = M.copy().pop()
for r in range(
((b * s_minus_1 - B2) * 2) // n,
n
):
for inner_s in range(
(B2 + r * n) // b,
((B3 - 1 + r * n) // a) + 1
):
if self.oracle.experiment(c0 * pow(inner_s, e, n)):
return inner_s
assert False, \
"Something went wrong while finding s_i"
# Initial round, for i == 1
for new_s in range(matasano.math.int_division_ceil(n, B3), n):
if self.oracle.experiment(c0 * pow(new_s, e, n)):
s_i = new_s
M = _update_m()
break
else:
assert False, \
"Something went wrong while finding s_1"
while not _is_done():
s_i = _iterate(s_i)
M = _update_m()
a = M.pop().start
self.message = matasano.util.bytes_for_int(
a
)
return self.oracle.guess(a)
class AttackerCBCMacForge(Attacker):
"""
Forge a CBC-MAC by using a length extension attack.
:param oracle: The oracle to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleCBCMac):
super().__init__(oracle)
self.forged_mac = None
self.message = None
def attack(self) -> bool:
"""
Forge a valid MAC for an extended message as follows:
- Get the original message and its MAC.
- Ask the Oracle for a new MAC (m), for a valid message.
- Now, apply length extension:
m is going to be a valid MAC for the original message
plus the exclusive OR between the original MAC and the
first block of the attacker's message plus the remainder
of the attacker's message.
:return: True on success.
"""
message, mac = self.oracle.challenge()
trap = b'from=0_E&tx_list=0_B:10;0_C:1000;0_E:10000000000'
forged_mac = self.oracle.experiment(trap)
trap = message + matasano.util.xor(mac, trap[0:16]) + trap[16:]
self.forged_mac = forged_mac
self.message = trap
return self.oracle.guess(trap, forged_mac)
class AttackerCBCMacHash(Attacker):
"""
Find a CBC-MAC as hash collision.
:param oracle: The oracle to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleCBCMacHash):
super().__init__(oracle)
self.message = None
self.digest = None
self.collision = None
def attack(self) -> bool:
"""
Another length extension attack, done in reverse.
Now we first get the extension, and then we append it
to something we generated.
This will result in a collision, because the CBC-MAC
of the suffix will be the same of the whole.
"""
self.message, key, self.digest = self.oracle.challenge()
trap = b"alert('Ayo, the Wu is back!'); //" # The // will let the JS engine ignore the rest.
assert len(trap) % 16 == 0, len(trap)
self.collision = trap + matasano.util.xor(
matasano.mac.aes_cbc_mac(key, trap), self.message[0:16]
) + self.message[16:]
return self.oracle.guess(self.collision)
class AttackerCompress(Attacker):
"""
Retrieve a compressed and then encrypted session id.
:param oracle: The oracle to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleCompress):
super().__init__(oracle)
self.session_id = b''
def attack(self) -> bool:
"""
We have a compression oracle and we have partial knowledge of the transmitted plaintext.
We can build a message that is exactly equal to the transmitted message,
minus the session id inside the cookie.
This way, each letter is going to end up being duplicated,
except for those in the session id.
Finding each letter thus reduces to finding those message that are compressed to the smallest size.
In the second version of the challenge things get more complicated, because the
oracle is using a block-cipher and thus we may have the real length of the message
hidden behind a padding.
In such case, we use additional characters (that we're sure to never appear inside the plaintext)
until we find a unique candidate.
"""
charset = (string.ascii_letters + string.digits + "+/=").encode('ascii')
padding = '!"#$%&\'()*,;<>?@[\\]^_`{|}~' # No Base64 and not inside the plaintext
encrypted_s_id = self.oracle.challenge()
for _ in range(len(encrypted_s_id)):
for j in range(0, len(padding)):
counts = collections.defaultdict(list)
for c in charset:
l = self.oracle.experiment(
self.oracle.REQUEST_FORMATTER.format(
(self.session_id.decode('ascii') + chr(c) + padding[:j]) * 3,
len(self.session_id), self.session_id.decode('ascii')
).encode('ascii')
)
counts[l].append(c)
m = min(counts)
if len(counts[m]) == 1:
self.session_id += bytes([counts[m][0]])
break
else:
assert False, "Something went wrong while guessing next character"
print(self.session_id)
return self.oracle.guess(self.session_id)
class AttackerRC4Cookie(Attacker):
"""
Retrieve a hidden cookie from the oracle, by knowing byte bias of the RC4 cipher.
:param oracle: The oracle to be attacked.
"""
def __init__(self, oracle: matasano.oracle.OracleCompress):
super().__init__(oracle)
self.cookie = None
def attack(self) -> bool:
"""
We know that the RC4 cipher's output has some fixed offset whose byte values are biased.
We'll exploit it in order to find a hidden cookie suffixed by the oracle to every request
we make.
"""
challenge = self.oracle.challenge()
self.cookie = bytearray([0] * len(challenge))
# Sorry, this code is really really slow.
# I've put it here more as a PoC than as a real attack.
# A faster language is definitely advised.
prefix = b'A' * (len(challenge) // 16)
for i in range(0, (len(challenge) // 2) + 1):
print("Progress: {}/{}.".format(i, (len(challenge) // 2) + 1))
prefix += b'A'
bias_15 = collections.defaultdict(lambda: 0)
bias_31 = collections.defaultdict(lambda: 0)
for _ in range(2 ** 24): # should be something around 2 ** 30
cipher = self.oracle.experiment(prefix)
assert len(cipher) >= 32
bias_15[cipher[15] ^ 240] += 2
bias_15[cipher[15] ^ 0] += 1
bias_15[cipher[15] ^ 16] += 1
bias_31[cipher[31] ^ 224] += 2
bias_31[cipher[31] ^ 0] += 1
bias_31[cipher[31] ^ 32] += 1
k = 15 - i - (len(challenge) // 16) - 1
if k >= 0:
most_biased = sorted(bias_15, key=lambda k: bias_15[k], reverse=True)
for b in most_biased:
if chr(b).isalpha():
self.cookie[k] = b
else:
self.cookie[k] = most_biased[0]
k = 31 - i - (len(challenge) // 16) - 1
most_biased = sorted(bias_31, key=lambda k: bias_31[k], reverse=True)
for b in most_biased:
if chr(b).isalpha():
self.cookie[k] = b
else:
self.cookie[k] = most_biased[0]
self.cookie = bytes(self.cookie)
return self.oracle.guess(self.cookie)
``` |
{
"source": "JohnnyPeng18/memsource-wrap",
"score": 2
} |
#### File: memsource/api_rest/job.py
```python
import io
import json
import os
import uuid
from typing import Any, Dict, List
from memsource import api_rest, constants, exceptions, models
class Job(api_rest.BaseApi):
# Document: https://cloud.memsource.com/web/docs/api#tag/Job
def _create(
self,
project_id: int,
target_langs: List[str],
files: Dict[str, Any]
) -> List[models.JobPart]:
"""Common process of creating job.
If returning JSON has `unsupportedFiles`,
this method raise MemsourceUnsupportedFileException
:param project_id: New job will be in this project.
:param file_path: Source file of job.
:param target_langs: List of translation target languages.
:return: List of models.JobPart
"""
if isinstance(files["file"], tuple):
# If value is tuple type, this function called from createFromText.
# We need to create temporary file for to raise exception.
file_name, text = files["file"]
file_path = os.path.join("/", "tmp", file_name)
else:
file_name = file_path = files["file"].name
job_create_extra_headers = {
"Content-Type": "application/octet-stream",
"Content-Disposition": "inline; filename=\"{}\"".format(file_name),
"Memsource": json.dumps({"targetLangs": target_langs})
}
self.add_headers(job_create_extra_headers)
result = self._post("v1/projects/{}/jobs".format(project_id), {
"targetLangs": target_langs,
}, files)
# unsupported file count is 0 mean success.
unsupported_files = result.get("unsupportedFiles", [])
if len(unsupported_files) == 0:
return [models.JobPart(job_parts) for job_parts in result["jobs"]]
if isinstance(files["file"], tuple):
with open(file_path, "w+") as f:
f.write(text)
raise exceptions.MemsourceUnsupportedFileException(
unsupported_files,
file_path,
self.last_url,
self.last_params
)
def create(
self, project_id: int, file_path: str, target_langs: List[str]
) -> List[models.JobPart]:
"""Create a job.
If returning JSON has `unsupportedFiles`,
this method raise MemsourceUnsupportedFileException
:param project_id: New job will be in this project.
:param file_path: Source file of job.
:param target_langs: List of translation target languages.
:return: List of models.JobPart
"""
with open(file_path, 'r') as f:
return self._create(project_id, target_langs, {'file': f})
def create_from_text(
self,
project_id: int,
text: str,
target_langs: List[str],
file_name: str=None,
) -> List[models.JobPart]:
"""You can create a job without a file.
See: Job.create
If returning JSON has `unsupportedFiles`,
this method raise MemsourceUnsupportedFileException
:param project_id: New job will be in this project.
:param text: Source text of job.
:param target_langs: List of translation target languages.
:param file_name: Create file name by uuid1() when file_name parameter is None.
:return: List of models.JobPart
"""
return self._create(project_id, target_langs, {
'file': ('{}.txt'.format(uuid.uuid1().hex) if file_name is None else file_name, text),
})
def list_by_project(
self,
project_id: int,
page: int = 0,
) -> List[models.JobPart]:
jobs = self._get("v2/projects/{}/jobs".format(project_id), {"page": page})
return [models.JobPart(job_part) for job_part in jobs["content"]]
def pre_translate(
self,
project_id: int,
job_parts: List[Dict[str, str]],
translation_memory_threshold: float=constants.TM_THRESHOLD,
callback_url: str=None,
) -> models.AsynchronousRequest:
"""Call async pre translate API.
:param job_parts: Dictionary of job_part id with format `{"uid": string}`.
:param translation_memory_threshold: If matching score is higher than this, it is filled.
:return: models.AsynchronousRequest
"""
response = self._post("v1/projects/{}/jobs/preTranslate".format(project_id), {
"jobs": [{"uid": job_part} for job_part in job_parts],
"translationMemoryTreshold": translation_memory_threshold,
"callbackUrl": callback_url,
})
return models.AsynchronousRequest(response["asyncRequest"])
def get_completed_file_text(self, project_id: int, job_uid: str) -> bytes:
"""Download completed file and return it.
:param job_uid: job UID.
"""
data_stream = self._get_stream(
"v1/projects/{}/jobs/{}/targetFile".format(project_id, job_uid)
).iter_content(constants.CHUNK_SIZE)
buffer = io.BytesIO()
for chunk in data_stream:
buffer.write(chunk)
return buffer.getvalue()
def get_segments(
self,
project_id: int,
job_uid: str,
begin_index: int=0,
end_index: int=0,
) -> List[models.Segment]:
"""Call get segments API.
NOTE: I don't know why this endpoint returns list of list.
It seems always one item in outer list.
:param project_id: ID of the project
:param job_uid: UID of the job
:param begin_index
:param end_index
:return: List of models.Segment
"""
segments = self._get("v1/projects/{}/jobs/{}/segments".format(project_id, job_uid), {
"beginIndex": begin_index,
"endIndex": end_index,
})
return [
models.Segment(segment) for segment in segments["segments"]
]
def get(self, project_id: int, job_uid: str) -> models.Job:
"""Get the job data.
:param job_uid: ID of the job.
:return: The retrieved job.
"""
response = self._get("v1/projects/{}/jobs/{}".format(project_id, job_uid))
return models.Job(response)
def list(self, project_id: int) -> List[models.Job]:
"""Get the jobs data.
:param project_id: ID of the project
:return: The retrieved jobs.
"""
response = self._get("v2/projects/{}/jobs".format(project_id))
return [models.Job(i) for i in response["content"]]
def delete(
self,
project_id: int,
job_uids: List[int],
purge: bool=False
) -> None:
"""Delete a job
:param job_uids: ids of job you want to delete.
:param purge:
"""
self._delete(
path="v1/projects/{}/jobs/batch".format(project_id),
params={"purge": purge},
data={"jobs": [{"uid": job_uid} for job_uid in job_uids]},
)
def set_status(
self,
project_id: int,
job_uid: str,
status: constants.JobStatusRest,
) -> None:
"""Update job status
JobStatus: New, Emailed, Assigned, Declined_By_Linguist,
Completed_By_Linguist, Completed, Cancelled
:param job_part_id: id of job you want to update.
:param status: status of job to update. Acceptable type is JobStatus constant.
"""
self._post("v1/projects/{}/jobs/{}/setStatus".format(project_id, job_uid), {
'requestedStatus': status.value
})
def delete_all_translations(
self,
project_id: int,
job_uids: List[int]
) -> None:
"""Delete all translations from a job
:param job_part_ids: IDs of job_part for the jobs.
"""
self._delete("v1/projects/{}/jobs/translations".format(project_id), {}, {
'jobs': [{"uid": job_uid} for job_uid in job_uids],
})
```
#### File: memsource-wrap/memsource/memsource.py
```python
from . import api
from memsource.api_rest import (
auth,
client,
domain,
language,
term_base,
project,
job,
bilingual,
tm,
analysis,
)
class Memsource(object):
def __init__(self, user_name=None, password=<PASSWORD>, token=None, headers=None, use_rest=False):
if use_rest:
self._init_rest(
user_name=user_name,
password=password,
token=token,
headers=headers,
)
return
"""
If token is given, use the token.
Otherwise authenticate with user_name and password, and get token.
"""
if user_name and password and not token and not headers:
token = api.Auth().login(user_name, password).token
# make api class instances
self.auth = api.Auth(token, headers)
self.client = api.Client(token, headers)
self.domain = api.Domain(token, headers)
self.project = api.Project(token, headers)
self.job = api.Job(token, headers)
self.translation_memory = api.TranslationMemory(token, headers)
self.asynchronous = api.Asynchronous(token, headers)
self.language = api.Language(token, headers)
self.analysis = api.Analysis(token, headers)
self.term_base = api.TermBase(token, headers)
def _init_rest(self, user_name, password, token, headers):
"""
If token is given, use the token.
Otherwise authenticate with user_name and password, and get token.
"""
if user_name and password and not token and not headers:
token = auth.Auth().login(user_name, password).token
# make api class instances
self.auth = auth.Auth(token, headers)
self.client = client.Client(token, headers)
self.domain = domain.Domain(token, headers)
self.project = project.Project(token, headers)
self.job = job.Job(token, headers)
self.translation_memory = tm.TranslationMemory(token, headers)
self.language = language.Language(token, headers)
self.analysis = analysis.Analysis(token, headers)
self.term_base = term_base.TermBase(token, headers)
self.bilingual = bilingual.Bilingual(token, headers)
```
#### File: JohnnyPeng18/memsource-wrap/setup.py
```python
from setuptools import setup
import memsource
def parse_requirements():
with open('requirements.txt') as f:
return [l.strip() for l in f.readlines() if not l.startswith('#')]
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='Memsource-wrap',
version=memsource.__version__,
description=readme(),
license=memsource.__license__,
author=memsource.__author__,
author_email='<EMAIL>',
url='https://github.com/gengo/memsource-wrap',
keywords='Memsource API',
packages=('memsource', 'memsource.lib', 'memsource.api_rest'),
install_requires=parse_requirements(),
)
```
#### File: test/api_rest/test_auth.py
```python
import requests
import unittest
from unittest.mock import patch
from memsource import models
from memsource.api_rest import auth
class TestAuth(unittest.TestCase):
@patch.object(requests.Session, "request")
def test_login(self, mock_request: unittest.mock):
ms_response = unittest.mock.Mock(status_code=200)
ms_response.json.return_value = {
"user": {
"lastName": "mock-last-name",
"email": "<EMAIL>",
"firstName": "mock-first-name",
"id": "1234",
"userName": "mock-tm",
"role": "ADMIN",
"uid": "QWERTY"
},
"token": "mock-token",
"expires": "2020-06-19T07:31:23+0000"
}
mock_request.return_value = ms_response
response = auth.Auth().login(user_name="mock-user", password="<PASSWORD>")
self.assertIsInstance(response, models.Authentication)
self.assertIsInstance(response["user"], models.User)
self.assertDictEqual(response["user"], {
"lastName": "mock-last-name",
"email": "<EMAIL>",
"firstName": "mock-first-name",
"id": "1234",
"userName": "mock-tm",
"role": "ADMIN",
"uid": "QWERTY"
})
```
#### File: test/api_rest/test_bilingual.py
```python
import os
import requests
import unittest
import uuid
from unittest.mock import patch, PropertyMock
from memsource import constants, models
from memsource.api_rest.bilingual import Bilingual
class TestBilingual(unittest.TestCase):
@patch("builtins.open")
@patch.object(requests.Session, "request")
def test_get_bilingual_file(
self,
mock_request: unittest.mock.Mock,
mock_open: unittest.mock.Mock
):
type(mock_request()).status_code = PropertyMock(return_value=200)
mxliff_contents = ['test mxliff content', 'second']
mock_request().iter_content.return_value = [
bytes(content, 'utf-8') for content in mxliff_contents]
project_id = 1234
job_uids = [1, 2]
Bilingual(token="mock-token").get_bilingual_file(
project_id, job_uids, "test.xlf"
)
mock_request.assert_called_with(
constants.HttpMethod.post.value,
"https://cloud.memsource.com/web/api2/v1/projects/1234/jobs/bilingualFile",
params={"token": "mock-token"},
json={"jobs": [{"uid": 1}, {"uid": 2}]},
timeout=60,
)
@patch.object(requests.Session, "request")
def test_get_bilingual_file_xml(self, mock_request):
type(mock_request()).status_code = PropertyMock(return_value=200)
mxliff_contents = ['test mxliff content', 'second']
mock_request().iter_content.return_value = [
bytes(content, 'utf-8') for content in mxliff_contents]
project_id = 1234
job_uids = [1, 2]
returned_value = Bilingual(token="mock-token").get_bilingual_file_xml(project_id, job_uids)
self.assertEqual(''.join(mxliff_contents).encode(), returned_value)
mock_request.assert_called_with(
constants.HttpMethod.post.value,
"https://cloud.memsource.com/web/api2/v1/projects/1234/jobs/bilingualFile",
params={
"token": "<PASSWORD>",
},
json={"jobs": [{"uid": 1}, {"uid": 2}]},
timeout=60,
)
@patch.object(requests.Session, 'request')
def test_get_bilingual_as_mxliff_units(self, mock_request):
type(mock_request()).status_code = PropertyMock(return_value=200)
mxliff_contents = ['test mxliff content', 'second']
mock_request().iter_content.return_value = [
bytes(content, 'utf-8') for content in mxliff_contents]
base_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)))
with open(os.path.join(base_dir, '..', 'lib', 'mxliff', 'test.mxliff')) as f:
mxliff = f.read().encode('utf-8')
mock_request().iter_content.return_value = [
mxliff[i: i + 100] for i in range(0, len(mxliff), 100)
]
project_id = 1234
job_uids = [1, 2]
bi = Bilingual(token="mock-token")
returned_value = bi.get_bilingual_as_mxliff_units(project_id, job_uids)
self.assertEqual(len(returned_value), 2)
self.assertIsInstance(returned_value[0], models.MxliffUnit)
self.assertEqual(returned_value[0], {
'id': 'fj4ewiofj3qowjfw:0',
'score': 0.0,
'gross_score': 0.0,
'source': 'Hello World.',
'target': None,
'machine_trans': None,
'memsource_tm': None,
'tunit_metadata': [],
})
self.assertIsInstance(returned_value[1], models.MxliffUnit)
self.assertEqual(returned_value[1], {
'id': 'fj4ewiofj3qowjfw:1',
'score': 1.01,
'gross_score': 1.01,
'source': 'This library wraps Memsoruce API for Python.',
'target': 'このライブラリはMemsourceのAPIをPython用にラップしています。',
'machine_trans': 'This is machine translation.',
'memsource_tm': 'This is memsource translation memory.',
'tunit_metadata': [],
})
mock_request.assert_called_with(
constants.HttpMethod.post.value,
"https://cloud.memsource.com/web/api2/v1/projects/1234/jobs/bilingualFile",
params={
"token": "mock-token",
},
json={"jobs": [{"uid": 1}, {"uid": 2}]},
timeout=60,
)
@patch.object(uuid, "uuid1")
@patch.object(requests.Session, "request")
def test_upload_bilingual_file_from_xml(
self,
mock_request: unittest.mock.Mock,
mock_uuid1: unittest.mock.Mock
):
type(mock_request()).status_code = PropertyMock(return_value=200)
xml = "<xml>this is test</xml>"
mock_uuid1().hex = "test_file"
Bilingual(token="mock-token").upload_bilingual_file_from_xml(xml)
mock_request.assert_called_with(
constants.HttpMethod.put.value,
"https://cloud.memsource.com/web/api2/v1/bilingualFiles",
params={"token": "mock-token"},
files={"file": ("test_file.mxliff", xml)},
timeout=constants.Base.timeout.value
)
``` |
{
"source": "JohnnyPolite/LODE",
"score": 3
} |
#### File: libraries/manual_scripts/NS.py
```python
import pandas as pd
df=pd.read_csv('/home/csis/codes/OpenTabulate/Libraries/NS_Libraries.csv')
def strip_point(x):
t=x.strip('(')
t=t.rstrip(')')
# t=t.strip(' (')
print(t)
return t.split()
LONGS=[]
LATS=[]
for i in df.Location:
a=str(i)
if a !='nan':
LONGS.append(strip_point(a)[1])
LATS.append(strip_point(a)[0])
else:
LONGS.append('')
LATS.append('')
df["LONGITUDE"]=LONGS
df["LATITUDE"]=LATS
f_out='/home/csis/codes/OpenTabulate/pddir/raw/NS_Libraries.csv'
df.to_csv(f_out)
```
#### File: libraries/manual_scripts/QC.py
```python
import geopandas as gpd
import pandas as pd
#read shapefile with geopandas into geodataframe
gdf=gpd.read_file('/home/csis/codes/OpenTabulate/Libraries/QC_Libraries.geoJSON')
gdf=pd.DataFrame(gdf)
def strip_point(x):
t=x.strip('POINT (')
t=t.rstrip(')')
print(t)
return t.split()
LONGS=[]
LATS=[]
for i in gdf.geometry:
a=str(i)
LONGS.append(strip_point(a)[0])
LATS.append(strip_point(a)[1])
gdf["LONGITUDE"]=LONGS
gdf["LATITUDE"]=LATS
gdf['Adresse']=gdf['Adresse'].str.replace(',','')
gdf.to_csv('/home/csis/codes/OpenTabulate/Libraries/QC_Libraries.csv')
```
#### File: HealthFacilities/manual_scripts/ON_Hospitals.py
```python
import geopandas as gpd
import pandas as pd
#read shapefile with geopandas into geodataframe
sc1=gpd.read_file('/home/csis/codes/shape_to_csv/MOH_SERVICE_LOCATION.shp')
sc1=pd.DataFrame(sc1)
print(sc1.SERV_TYPE.unique())
sc1=sc1.loc[(sc1["SERV_TYPE"]=="Hospital - Corporation") | (sc1["SERV_TYPE"]=="Hospital - Site")]
#sc1=sc1.loc[sc1["SERV_TYPE"]==("Hospital - Site")]
def strip_point(x):
x=str(x)
t=x.strip('POINT (')
t=t.rstrip(')')
print(t)
return t.split()
LONGS=[]
LATS=[]
for i in sc1.geometry:
LONGS.append(strip_point(i)[0])
LATS.append(strip_point(i)[1])
sc1["LONGITUDE"]=LONGS
sc1["LATITUDE"]=LATS
print(sc1)
sc1.to_csv('Ontario_hospitals.csv')
``` |
{
"source": "johnny-prnd/drf-yasg",
"score": 2
} |
#### File: drf-yasg/tests/test_get_basic_type_info_from_hint.py
```python
import uuid
import pytest
from drf_yasg import openapi
from drf_yasg.inspectors.field import get_basic_type_info_from_hint
try:
import typing
from typing import Dict, List, Union, Optional, Set
except ImportError:
typing = None
if typing:
@pytest.mark.parametrize('hint_class, expected_swagger_type_info', [
(int, {'type': openapi.TYPE_INTEGER, 'format': None}),
(str, {'type': openapi.TYPE_STRING, 'format': None}),
(bool, {'type': openapi.TYPE_BOOLEAN, 'format': None}),
(dict, {'type': openapi.TYPE_OBJECT, 'format': None}),
(Dict[int, int], {'type': openapi.TYPE_OBJECT, 'format': None}),
(uuid.UUID, {'type': openapi.TYPE_STRING, 'format': openapi.FORMAT_UUID}),
(List[int], {'type': openapi.TYPE_ARRAY, 'items': openapi.Items(openapi.TYPE_INTEGER)}),
(List[str], {'type': openapi.TYPE_ARRAY, 'items': openapi.Items(openapi.TYPE_STRING)}),
(List[bool], {'type': openapi.TYPE_ARRAY, 'items': openapi.Items(openapi.TYPE_BOOLEAN)}),
(Set[int], {'type': openapi.TYPE_ARRAY, 'items': openapi.Items(openapi.TYPE_INTEGER)}),
(Optional[bool], {'type': openapi.TYPE_BOOLEAN, 'format': None, 'x-nullable': True}),
(Optional[List[int]], {
'type': openapi.TYPE_ARRAY, 'items': openapi.Items(openapi.TYPE_INTEGER), 'x-nullable': True
}),
(Union[List[int], type(None)], {
'type': openapi.TYPE_ARRAY, 'items': openapi.Items(openapi.TYPE_INTEGER), 'x-nullable': True
}),
# Following cases are not 100% correct, but it should work somehow and not crash.
(Union[int, float], None),
(List, {'type': openapi.TYPE_ARRAY, 'items': openapi.Items(openapi.TYPE_STRING)}),
('SomeType', None),
(type('SomeType', (object,), {}), None),
(None, None),
(6, None),
])
def test_get_basic_type_info_from_hint(hint_class, expected_swagger_type_info):
type_info = get_basic_type_info_from_hint(hint_class)
assert type_info == expected_swagger_type_info
``` |
{
"source": "Johnny-QA/Python_training",
"score": 4
} |
#### File: Python_training/Python tests/classes_objects.py
```python
lottery_player_dict = {
'name': 'Rolf',
'numbers': (5, 9, 12, 3, 1, 21)
}
class LotteryPlayer:
def __init__(self, name):
self.name = name
self.numbers = (5, 9, 12, 3, 1, 21)
def total(self):
return sum(self.numbers)
player_one = LotteryPlayer('Rolf')
player_one.numbers = (1, 2, 3, 6, 7, 8)
player_two = LotteryPlayer('John')
# print(player_two.total())
# print(player_one.numbers == player_two.numbers)
##
class Student:
def __init__(self, name, school):
self.name = name
self.school = school
self.marks = []
def average(self):
return sum(self.marks) / len(self.marks)
anna = Student('Anna', 'MIT')
anna.marks.append(18)
anna.marks.append(10)
anna.marks.append(33)
anna.marks.append(51)
print(anna.name, anna.school, anna.marks, anna.average())
```
#### File: Python_training/Python tests/if_statement.py
```python
def who_do_you_know():
people = input("Enter the names of people you know, separated by commas: ")
people_list = people.split(",")
normal_list = [person.strip().lower() for person in people_list]
return normal_list
# people_list_without_spaces = []
# for person in people_list:
# people_list_without_spaces.append(person.strip())
# return people_list_without_spaces
def ask_user():
person = input("Enter a name of someone you know: ")
if person.lower() in who_do_you_know():
return print("You know {}!".format(person))
else:
return print("You don`t know {}!".format(person))
ask_user()
``` |
{
"source": "johnny-richards/PyCandle",
"score": 3
} |
#### File: PyCandle/modules/layers.py
```python
import numpy as np
import pdb
from modules.base import ModuleBase
class Linear(ModuleBase):
def __init__(self, input_dim, output_dim, bias=True):
super(Linear, self).__init__()
self.bias = bias
self.W = np.random.randn(input_dim, output_dim) / np.sqrt(input_dim)
self.params['weight'] = self.W
if bias:
self.b = np.zeros(output_dim)
self.params['bias'] = self.b
def forward(self, x):
# check type
assert isinstance(x, np.ndarray)
# x: batch_size x input_dim
batch_size = x.shape[0]
y = np.dot(x, self.W)
if self.bias:
y += np.tile(self.b, (batch_size, 1))
self.results['x'] = x
return y
def backward(self, grad):
# check type
assert isinstance(grad, np.ndarray)
# grad: batch_size x output_dim
grad_input = np.dot(grad, self.W.T) # batch_size x input_dim
# gradient of such layer
x = self.results['x'] # batch_size x input_dim
batch_size = x.shape[0]
input_dim = x.shape[1]
output_dim = grad.shape[1]
x_unsqueeze = x.reshape((batch_size, input_dim, 1))
grad_unsqueeze = grad.reshape(batch_size, 1, output_dim)
Wgrad = np.sum(np.array([np.dot(cur_x, grad_unsqueeze[idx])
for idx, cur_x in enumerate(x_unsqueeze)]), 0) # input_dim x output_dim
if self.bias:
bgrad = np.sum(grad, 0) # output_dim
# save grad
self.grads['weight'] = Wgrad
if self.bias:
self.grads['bias'] = bgrad
# initialize result
self.results = {}
return grad_input
class Sigmoid(ModuleBase):
def __init__(self):
super(Sigmoid, self).__init__()
def forward(self, x):
# x: batch_size x dim
evalue = np.exp(-x)
sigmoid = 1 / (1 + evalue)
self.results['sigmoid'] = sigmoid
return sigmoid
def backward(self, grad):
sigmoid = self.results['sigmoid']
grad_input = grad * sigmoid * (1 - sigmoid)
self.results = {}
return grad_input
class Softmax(ModuleBase):
def __init__(self):
super(Softmax, self).__init__()
def forward(self, x):
# x: batch_size x dim
xmax = np.max(x, 1) # batch_size
xmax_expand = np.tile(xmax.reshape(x.shape[0], 1), (1, x.shape[1]))
evalue = np.exp(x - xmax_expand)
esum = np.sum(evalue, 1)
esum_expand = np.tile(esum.reshape(x.shape[0], 1), (1, x.shape[1]))
softmax = evalue / esum_expand # batch_size x dim
self.results['softmax'] = softmax
return softmax
def backward(self, grad):
softmax = self.results['softmax']
self.results = {}
W1 = np.array([np.diag(q) for q in softmax]) # batch_size x dim x dim
q = softmax.reshape(softmax.shape[0], softmax.shape[1], 1) # batch_size x dim x 1
qt = softmax.reshape(softmax.shape[0], 1, softmax.shape[1]) # batch_size x 1 x dim
W2 = np.array([np.dot(q[k], qt[k]) for k in range(q.shape[0])]) # batch_size x dim x dim
W = W1 - W2
grad_expand = grad.reshape(grad.shape[0], 1, grad.shape[1]) # batch_size x 1 x dim
grad_input_expand = np.array([np.dot(grad_expand[k], W[k]) for k in range(grad.shape[0])]) # batch_size x 1 x dim
grad_input = grad_input_expand.reshape(grad.shape[0], grad.shape[1]) # batch_size x dim
return grad_input
class Tanh(ModuleBase):
def __init__(self):
super(Tanh, self).__init__()
def forward(self, x):
# x: batch_size x dim
evalue = np.exp(x)
value = (evalue - 1 / evalue) / (evalue + 1 / evalue)
self.results['tanh'] = value
return value
def backward(self, grad):
value = self.results['tanh']
self.results = {}
grad_input = grad * (1 - value ** 2)
return grad_input
class ReLU(ModuleBase):
def __init__(self):
super(ReLU, self).__init__()
def forward(self, x):
# x: batch size x dim
mask = (x > 0.0).astype(np.float)
y = mask * x
self.results['mask'] = mask
return y
def backward(self, grad):
grad_input = grad * self.results['mask']
self.results = {}
return grad_input
if __name__=='__main__':
# check gradient
input_dim = 8
output_dim = 5
batch_size = 2
# model
linear = Linear(input_dim, output_dim)
print('{}'.format(linear.__class__.__name__))
``` |
{
"source": "JohnnyRyan1/parks-and-golf",
"score": 3
} |
#### File: parks-and-golf/scripts/3_centroid_golf_park_pop.py
```python
import geopandas as gpd
import pandas as pd
import numpy as np
import glob
import os
import math
import scipy.spatial as spatial
from shapely.ops import nearest_points
# Define path
path = '/home/johnny/Documents/Teaching/490_Geospatial_Data_Science_Applications/Applications/OSM_Parks_and_Golf/data/'
# Import state codes
codes = pd.read_csv(path + 'state_codes.csv')
# Define parks and golf courses
parks_list = sorted(glob.glob(path + 'parks_and_golf/*_parks.shp'))
golf_list = sorted(glob.glob(path + 'golf_with_name/*_golf_courses.shp'))
# Define urban areas
bg_list = sorted(glob.glob(path + 'urban_block_groups/*.shp'))
# Define population data
pop_list = sorted(glob.glob(path + 'pop_by_block_group/*.csv'))
def convert_wgs_to_utm(lon, lat):
utm_band = str((math.floor((lon + 180) / 6 ) % 60) + 1)
if len(utm_band) == 1:
utm_band = '0'+utm_band
if lat >= 0:
epsg_code = '326' + utm_band
else:
epsg_code = '327' + utm_band
return epsg_code
for j in range(codes.shape[0]):
# Get FIPS code
fips = str(codes['FIPS'].iloc[j]).zfill(2)
# Get state name
state_name = codes['Name'].iloc[j].replace(' ', '_')
if os.path.exists(path + 'state_stats/data_' + fips + '_' + state_name + '.shp'):
print('Skipping... %.0f out of %.0f' %(j+1, codes.shape[0]))
else:
print('Processing... %.0f out of %.0f' %(j+1, codes.shape[0]))
#######################################################################
# Get corresponding files
#######################################################################
# Get park and golf course shapefiles
matching_park = [s for s in parks_list if state_name + '_parks.shp' in s]
matching_golf = [s for s in golf_list if state_name + '_golf_courses.shp' in s]
# Get urban block groups shapefile
matching_bg = [s for s in bg_list if fips + '_bg_urban.shp' in s]
# Get block group population table
matching_pop = [s for s in pop_list if '_' + fips in s]
#######################################################################
# Read all files
#######################################################################
park_gdf = gpd.read_file(matching_park[0])
golf_gdf = gpd.read_file(matching_golf[0])
bg_gdf = gpd.read_file(matching_bg[0])
pop_df = pd.read_csv(matching_pop[0])
# Dissolve and explode to remove overlapping polygons
park_dissolve = park_gdf.dissolve()
park_dissolve = park_dissolve.explode()
# =============================================================================
# golf_dissolve = golf_gdf.dissolve()
# golf_dissolve = golf_dissolve.explode()
# =============================================================================
golf_dissolve = golf_gdf
#######################################################################
# Convert everything to UTM coordinates
#######################################################################
# Get UTM zone EPSG code of state
lon_poly, lat_poly = park_dissolve[(park_dissolve['geometry'].geom_type == 'Polygon')]['geometry'].iloc[0].exterior.coords.xy
utm_zone = convert_wgs_to_utm(lon_poly[0], lat_poly[0])
epsg = 'EPSG:' + utm_zone
# Convert
bg_gdf = bg_gdf.to_crs(epsg)
park_dissolve = park_dissolve.to_crs(epsg)
golf_dissolve = golf_dissolve.to_crs(epsg)
# Compute area
park_dissolve['area'] = park_dissolve['geometry'].area
golf_dissolve['area'] = golf_dissolve['geometry'].area
# Remove anything smaller than a football pitch
park_dissolve = park_dissolve[park_dissolve['area'] > 7000]
golf_dissolve = golf_dissolve[golf_dissolve['area'] > 7000]
park_dissolve.reset_index(inplace=True)
golf_dissolve.reset_index(inplace=True)
# Compute centroids
bg_gdf['centroid'] = bg_gdf['geometry'].centroid
park_dissolve['centroid'] = park_dissolve['geometry'].centroid
golf_dissolve['centroid'] = golf_dissolve['geometry'].centroid
# Construct kd tree
park_point_tree = spatial.cKDTree(np.vstack((park_dissolve['centroid'].x.values,
park_dissolve['centroid'].y.values)).T)
golf_point_tree = spatial.cKDTree(np.vstack((golf_dissolve['centroid'].x.values,
golf_dissolve['centroid'].y.values)).T)
# Calculate distance to parks and golf courses
distance_park = []
distance_golf = []
name_golf = []
for i in range(bg_gdf.shape[0]):
#print('%s... %.0f out of % .0f' %(state_name, i+1, bg_gdf.shape[0]))
# Find nearest park to block group centroid
dist1, idx1 = park_point_tree.query((bg_gdf['centroid'].x.iloc[i],
bg_gdf['centroid'].y.iloc[i]),
k=4)
# Compute distance from block group centroid to park edge
distances = []
for idx in range(len(idx1)):
p1, p2 = nearest_points(park_dissolve.iloc[idx1[idx]]['geometry'], bg_gdf['centroid'].iloc[i])
distances.append(p1.distance(p2))
# Append to list
distance_park.append(np.array(distances).min())
# Find nearest golf course to block group centroid
dist2, idx2 = golf_point_tree.query((bg_gdf['centroid'].x.iloc[i],
bg_gdf['centroid'].y.iloc[i]),
k=1)
# Compute distance from block group centroid to park edge
p3, p4 = nearest_points(golf_dissolve.iloc[idx2]['geometry'], bg_gdf['centroid'].iloc[i])
# Append to list
distance_golf.append(p3.distance(p4))
name_golf.append(golf_dissolve.iloc[idx2]['name'])
""" note that it is possible that some parks and golf courses overlap.
Right now they would both be counted. """
bg_gdf['park_dist'] = distance_park
bg_gdf['golf_dist'] = distance_golf
bg_gdf['golf_name'] = name_golf
# Drop some columns so can export
bg_gdf.drop(columns=['centroid'], inplace=True)
# Export to shapefile
bg_gdf.to_file(path + 'state_stats/data_' + fips + '_' + state_name + '.shp')
``` |
{
"source": "JohnnySn0w/BabbleBot",
"score": 3
} |
#### File: JohnnySn0w/BabbleBot/boomer.py
```python
import random
prefix = [
'Look at you! ',
'Bless ',
'Bless! ',
'I heard about that! ',
'Amen!',
'You and the kids doing alright?',
'Miss ya\'ll!'
]
suffix = [
'. Amen!',
'. God bless america',
'. God bless!',
' haha',
'. love ya!',
'. love ya\'ll!',
]
def add_pre_suf(sentence):
if random.randint(1,10) <= 6:
if random.randint(1,10) <= 5:
sentence = prefix[random.randint(0, len(prefix) - 1)] + sentence
else:
sentence += suffix[random.randint(0, len(suffix) - 1)]
return sentence
def add_elipses(sentence):
words = sentence.split()
for i in range(4, len(words), 5):
if random.randint(1,10) <= 7:
words[i] += "..."
return " ".join(words)
def boomer_caps(sentence):
seed = random.randint(1, 10)
sent_array = sentence.split()
if seed in (1, 2, 3):
return sentence
elif seed in (4, 5):
temp_sent = []
for x in sent_array:
if random.random() < 0.25:
x = x.upper()
temp_sent.append(x)
return " ".join(temp_sent)
elif seed in (6, 7):
temp_sent = []
for x in sent_array:
if random.random() < 0.5:
x = x.upper()
temp_sent.append(x)
return " ".join(temp_sent)
elif seed in (8, 9):
return sentence.title()
elif seed == 10:
return sentence.upper()
```
#### File: JohnnySn0w/BabbleBot/markov.py
```python
import json
import random
BEGIN_TAG = "<START>"
END_TAG = "<END>"
DEFAULT_STATE_LEN = 1
def get_zip(words, state_len: int =DEFAULT_STATE_LEN):
zip_list = []
for i in range(0, state_len + 1):
zip_list.append(words[i:])
return zip_list
def create_markov_model(corpus: str, delim: str = "\n", state_len: int =DEFAULT_STATE_LEN):
model = {}
for line in corpus.split(delim):
# Split the sentence into words. Pad with a beginning and end tag.
words = line.split()
words.insert(0, BEGIN_TAG)
words.append(END_TAG)
# For every consecutive state_len + 1 words in the sentence, create or update a key/pair value.
for tup in zip(*get_zip(words, state_len)):
key = " ".join(tup[:-1])
if key not in model:
model[key] = []
model[key] += [tup[-1]]
return model
def gen_sentence(model, state_len: int =DEFAULT_STATE_LEN):
sentence = []
while not sentence:
if state_len > 1:
sentence = random.choice([key for key in model if key.split()[0] == BEGIN_TAG]).split()
else:
sentence = [random.choice(model[BEGIN_TAG])]
while True:
try:
if state_len > 1:
curr_word = random.choice(model[" ".join(sentence[-state_len:])])
else:
curr_word = random.choice(model[sentence[-1]])
except KeyError:
break
if curr_word != END_TAG:
sentence.append(curr_word)
else:
break
if sentence[-1] == END_TAG:
sentence = []
if state_len > 1:
sentence.pop(0)
return " ".join(sentence)
if __name__ == "__main__":
with open("197937757954375680.out", "r") as f:
content = f.read()
model = create_markov_model(content)
for _ in range(10):
print(gen_sentence(model))
with open('vidata.json', 'w', encoding='utf-8-sig') as f:
json.dump(model, f)
``` |
{
"source": "johnnysnq/rdl",
"score": 3
} |
#### File: johnnysnq/rdl/rdl.py
```python
import os
import sys
import redis
import base64
import argparse
__version__ = '1.2.0'
PY2 = sys.version_info.major == 2
BUF_LIMIT = 1024 * 2
WRITE_MODE = 'wb'
AWRITE_MODE = 'ab'
if PY2:
WRITE_MODE = 'w'
AWRITE_MODE = 'a'
def write_file(file_name, buf, initial=False):
"""
:param bytes buf: buffer to write, in python2 it's type would be str
"""
if initial:
mode = WRITE_MODE
if os.path.exists(file_name):
print('==> Warning: {} will be covered!'.format(file_name))
else:
print('==> Writing {}, chunk size {}'.format(file_name, len(buf)))
mode = AWRITE_MODE
with open(file_name, mode) as f:
f.write(buf)
def print_loop(loop, clear=True):
s = '==> processed keys: {}'.format(loop)
if clear:
sys.stdout.write(s)
sys.stdout.flush()
sys.stdout.write(len(s) * '\b')
else:
print(s)
def get_client(n, host=None, port=None, password=None):
if hasattr(redis, 'StrictRedis'):
client_class = redis.StrictRedis
else:
# Backward compatibility
client_class = redis.Redis
kwargs = {}
if host:
kwargs['host'] = host
if port:
kwargs['port'] = port
if password:
kwargs['password'] = password
db = client_class(db=n, **kwargs)
print('==> Use redis {}:{}/{}'.format(host or '<default host>', port or '<default port>', n))
# TODO show db info
return db
def dump(file_name, db, ignore_none_value=False):
buf = b''
loop = 0
initial_write = True
# NOTE KEYS may ruin performance when it is executed against large databases.
# SCAN can be used in production without the downside of commands like KEYS
for k in db.scan_iter():
# `k`is bytes for python3, str for python2
# dump() returns bytes for python3, str for python2
v = db.dump(k)
if v is None:
msg = 'got None when DUMP key `{}`'.format(k)
if ignore_none_value:
print('{}, ignore'.format(msg))
continue
else:
raise ValueError(msg)
# form the line
line = k + b'\t' + base64.b64encode(v) + b'\n'
buf += line
loop += 1
if loop % BUF_LIMIT == 0:
write_file(file_name, buf, initial_write)
print_loop(loop)
# Clear buf
buf = b''
initial_write = False
# In case of not reach limit
if buf:
write_file(file_name, buf, initial_write)
print_loop(loop, False)
if not loop:
print('Empty db, nothing happened')
return
def load(file_name, db, f):
if f:
print('==> Flush database!')
db.flushdb()
with open(file_name, 'r') as f:
loop = 0
for line in f:
k, v = tuple(line.split('\t'))
v = base64.b64decode(v)
try:
db.restore(k, 0, v)
except redis.exceptions.ResponseError:
print k,v
continue
loop += 1
if loop % BUF_LIMIT == 0:
print_loop(loop)
print_loop(loop, False)
def main():
parser = argparse.ArgumentParser(description="Redis dump-load tool.", add_help=False)
parser.add_argument('action', metavar="ACTION", type=str, choices=['dump', 'load'], help="`dump` or `load`.")
parser.add_argument('file_name', metavar="FILE", type=str, help="if action is dump, then its output file, if actions is load, then its source file.")
parser.add_argument('-n', type=int, default=0, help="Number of database to process.")
parser.add_argument('-h', '--host', type=str, help="Redis host")
parser.add_argument('-p', '--port', type=int, help="Redis port")
parser.add_argument('-a', '--auth', type=str, help="Redis password")
parser.add_argument('-f', '--flushdb', action='store_true', help="Force or flush database before load")
parser.add_argument('--ignore-none-value', action='store_true', help="Ignore None when dumping db, by default it will raise ValueError if DUMP result is None")
parser.add_argument('--help', action='help', help="show this help message and exit")
# --version
parser.add_argument('--version', action='version',
version='%(prog)s {version}'.format(version=__version__))
args = parser.parse_args()
db = get_client(args.n, args.host, args.port, args.auth)
if 'dump' == args.action:
dump(args.file_name, db, args.ignore_none_value)
else: # load
load(args.file_name, db, args.flushdb)
if __name__ == '__main__':
main()
``` |
{
"source": "JohnnySun8/TextWorld",
"score": 2
} |
#### File: TextWorld/tests/test_sample_quests.py
```python
import os
from os.path import join as pjoin
from subprocess import check_output
from textworld.utils import make_temp_directory
SCRIPTS_PATH = os.path.abspath(pjoin(__file__, "..", "..", "scripts"))
def test_sample_quests():
with make_temp_directory(prefix="test_sample_quests") as tmpdir:
game_file = pjoin(tmpdir, "game.ulx")
command = ["tw-make", "custom", "--seed", "20181004", "--output", game_file]
check_output(command).decode()
script = pjoin(SCRIPTS_PATH, "sample_quests.py")
command = ["python", script, "--nb-quests", "10", "--quest-length", "10",
"--quest-breadth", "5", "--output", tmpdir, game_file]
stdout = check_output(command).decode()
assert len(stdout) > 0
assert os.path.isfile(pjoin(tmpdir, "sample_world.png"))
assert os.path.isfile(pjoin(tmpdir, "sample_tree.svg"))
assert os.path.isfile(pjoin(tmpdir, "sample_graph.svg"))
```
#### File: TextWorld/tests/test_tw-play.py
```python
from subprocess import check_call
import textworld
from textworld.utils import make_temp_directory
def test_playing_a_game():
for ext in [".ulx", ".z8"]:
with make_temp_directory(prefix="test_tw-play") as tmpdir:
options = textworld.GameOptions()
options.file_ext = ext
options.path = tmpdir
options.nb_rooms = 5
options.nb_objects = 10
options.quest_length = 5
options.quest_breadth = 2
options.seeds = 1234
game_file, _ = textworld.make(options)
command = ["tw-play", "--max-steps", "100", "--mode", "random", game_file]
assert check_call(command) == 0
command = ["tw-play", "--max-steps", "100", "--mode", "random-cmd", game_file]
assert check_call(command) == 0
command = ["tw-play", "--max-steps", "100", "--mode", "walkthrough", game_file]
assert check_call(command) == 0
```
#### File: challenges/spaceship/agent_design_a2c.py
```python
from collections import defaultdict
from os.path import join as pjoin
from time import time
from glob import glob
from typing import Mapping, Any, Optional
import re
import numpy as np
import os
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from textworld import EnvInfos
import textworld.gym
PATH = pjoin(os.path.dirname(__file__), 'textworld_data')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class ActorzCritic(nn.Module):
eps = 0.01
def __init__(self, input_size, hidden_size):
super(ActorzCritic, self).__init__()
torch.manual_seed(42) # For reproducibility
self.embedding = nn.Embedding(input_size, hidden_size)
self.encoder_gru = nn.GRU(hidden_size, hidden_size)
self.cmd_encoder_gru = nn.GRU(hidden_size, hidden_size)
self.state_gru = nn.GRU(hidden_size, hidden_size)
self.linear_1 = nn.Linear(2 * hidden_size, 2 * hidden_size)
self.critic = nn.Linear(hidden_size, 1)
self.actor = nn.Linear(hidden_size * 2, 1)
# Parameters
self.state_hidden = torch.zeros(1, 1, hidden_size, device=device)
self.hidden_size = hidden_size
def forward(self, obs, commands, mode, method):
input_length, batch_size = obs.size(0), obs.size(1)
nb_cmds = commands.size(1)
embedded = self.embedding(obs)
encoder_output, encoder_hidden = self.encoder_gru(embedded)
state_output, state_hidden = self.state_gru(encoder_hidden, self.state_hidden)
self.state_hidden = state_hidden
state_value = self.critic(state_output)
# Attention network over the commands.
cmds_embedding = self.embedding.forward(commands)
_, cmds_encoding_last_states = self.cmd_encoder_gru.forward(cmds_embedding) # 1*cmds*hidden
# Same observed state for all commands.
cmd_selector_input = torch.stack([state_hidden] * nb_cmds, 2) # 1*batch*cmds*hidden
# Same command choices for the whole batch.
cmds_encoding_last_states = torch.stack([cmds_encoding_last_states] * batch_size, 1) # 1*batch*cmds*hidden
# Concatenate the observed state and command encodings.
input_ = torch.cat([cmd_selector_input, cmds_encoding_last_states], dim=-1)
# One FC layer
x = F.relu(self.linear_1(input_))
# Compute state-action value (score) per command.
action_state = F.relu(self.actor(x)).squeeze(-1) # 1 x Batch x cmds
# action_state = F.relu(self.actor(input_)).squeeze(-1) # 1 x Batch x cmds
probs = F.softmax(action_state, dim=2) # 1 x Batch x cmds
if mode == "train":
action_index = probs[0].multinomial(num_samples=1).unsqueeze(0) # 1 x batch x indx
elif mode == "test":
if method == 'random':
action_index = probs[0].multinomial(num_samples=1).unsqueeze(0) # 1 x batch x indx
elif method == 'arg-max':
action_index = probs[0].max(1).indices.unsqueeze(-1).unsqueeze(-1) # 1 x batch x indx
elif method == 'eps-soft':
index = probs[0].max(1).indices.unsqueeze(-1).unsqueeze(-1)
p = np.random.random()
if p < (1 - self.eps + self.eps / nb_cmds):
action_index = index
else:
while True:
tp = np.random.choice(probs[0][0].detach().numpy())
if (probs[0][0] == tp).nonzero().unsqueeze(-1) != index:
action_index = (probs[0][0] == tp).nonzero().unsqueeze(-1)
break
return action_state, action_index, state_value
def reset_hidden(self, batch_size):
self.state_hidden = torch.zeros(1, batch_size, self.hidden_size, device=device)
class NeuralAgent:
""" Simple Neural Agent for playing TextWorld games. """
MAX_VOCAB_SIZE = 1000
UPDATE_FREQUENCY = 10
LOG_FREQUENCY = 1000
GAMMA = 0.9
def __init__(self) -> None:
self.id2word = ["<PAD>", "<UNK>"]
self.word2id = {w: i for i, w in enumerate(self.id2word)}
self.model = ActorzCritic(input_size=self.MAX_VOCAB_SIZE, hidden_size=128)
self.optimizer = optim.Adam(self.model.parameters(), 0.00003)
def train(self):
self.mode = "train"
self.method = "random"
self.transitions = []
self.last_score = 0
self.no_train_step = 0
self.stats = {"max": defaultdict(list), "mean": defaultdict(list)}
self.memo = {"max": defaultdict(list), "mean": defaultdict(list), "mem": defaultdict(list)}
self.model.reset_hidden(1)
def test(self, method):
self.mode = "test"
self.method = method
self.model.reset_hidden(1)
@property
def infos_to_request(self) -> EnvInfos:
return EnvInfos(description=True, inventory=True, admissible_commands=True, has_won=True, has_lost=True)
def act(self, obs: str, score: int, done: bool, infos: Mapping[str, Any]) -> Optional[str]:
# Build agent's observation: feedback + look + inventory.
input_ = "{}\n{}\n{}".format(obs, infos["description"], infos["inventory"])
# Tokenize and pad the input and the commands to chose from.
input_tensor = self._process([input_])
commands_tensor = self._process(infos["admissible_commands"])
# Get our next action and value prediction.
outputs, indexes, values = self.model(input_tensor, commands_tensor, mode=self.mode, method=self.method)
action = infos["admissible_commands"][indexes[0]]
if self.mode == "test":
if done:
self.model.reset_hidden(1)
return action
self.no_train_step += 1
if self.transitions:
reward = score - self.last_score # Reward is the gain/loss in score.
self.last_score = score
if infos["has_won"]:
reward += 100
if infos["has_lost"]:
reward -= 100
self.transitions[-1][0] = reward # Update reward information.
self.stats["max"]["score"].append(score)
self.memo["max"]["score"].append(score)
if self.no_train_step % self.UPDATE_FREQUENCY == 0:
# Update model
returns, advantages = self._discount_rewards(values)
loss = 0
for transition, ret, advantage in zip(self.transitions, returns, advantages):
reward, indexes_, outputs_, values_ = transition
advantage = advantage.detach() # Block gradients flow here.
probs = F.softmax(outputs_, dim=2)
log_probs = torch.log(probs)
log_action_probs = log_probs.gather(2, indexes_)
policy_loss = (log_action_probs * advantage).sum()
value_loss = ((values_ - ret) ** 2.).sum()
entropy = (-probs * log_probs).sum()
loss += 0.5 * value_loss - policy_loss - 0.001 * entropy
self.memo["mem"]["selected_action_index"].append(indexes_.item())
self.memo["mem"]["state_val_func"].append(values_.item())
self.memo["mem"]["advantage"].append(advantage.item())
self.memo["mem"]["return"].append(ret.item())
self.memo["mean"]["reward"].append(reward)
self.memo["mean"]["policy_loss"].append(policy_loss.item())
self.memo["mean"]["value_loss"].append(value_loss.item())
self.stats["mean"]["reward"].append(reward)
self.stats["mean"]["policy_loss"].append(policy_loss.item())
self.stats["mean"]["value_loss"].append(value_loss.item())
self.stats["mean"]["entropy"].append(entropy.item())
self.stats["mean"]["confidence"].append(torch.exp(log_action_probs).item())
if self.no_train_step % self.LOG_FREQUENCY == 0:
msg = "{}. ".format(self.no_train_step)
msg += " ".join("{}: {:.3f}".format(k, np.mean(v)) for k, v in self.stats["mean"].items())
msg += " " + " ".join("{}: {}".format(k, np.max(v)) for k, v in self.stats["max"].items())
msg += " vocab: {}".format(len(self.id2word))
print(msg)
self.stats = {"max": defaultdict(list), "mean": defaultdict(list)}
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm(self.model.parameters(), 40)
self.optimizer.step()
self.optimizer.zero_grad()
self.transitions = []
self.model.reset_hidden(1)
else:
# Keep information about transitions for Truncated Backpropagation Through Time.
self.transitions.append([None, indexes, outputs, values]) # Reward will be set on the next call
if done:
self.last_score = 0 # Will be starting a new episode. Reset the last score.
return action
def _process(self, texts):
texts = list(map(self._tokenize, texts))
max_len = max(len(l) for l in texts)
padded = np.ones((len(texts), max_len)) * self.word2id["<PAD>"]
for i, text in enumerate(texts):
padded[i, :len(text)] = text
padded_tensor = torch.from_numpy(padded).type(torch.long).to(device)
padded_tensor = padded_tensor.permute(1, 0) # Batch x Seq => Seq x Batch
return padded_tensor
def _tokenize(self, text):
# Simple tokenizer: strip out all non-alphabetic characters.
text = re.sub("[^a-zA-Z0-9\- ]", " ", text)
word_ids = list(map(self._get_word_id, text.split()))
return word_ids
def _get_word_id(self, word):
if word not in self.word2id:
if len(self.word2id) >= self.MAX_VOCAB_SIZE:
return self.word2id["<UNK>"]
self.id2word.append(word)
self.word2id[word] = len(self.word2id)
return self.word2id[word]
def _discount_rewards(self, last_values):
returns, advantages = [], []
R = last_values.data
for t in reversed(range(len(self.transitions))):
rewards, _, _, values = self.transitions[t]
R = rewards + self.GAMMA * R
adv = R - values
returns.append(R)
advantages.append(adv)
return returns[::-1], advantages[::-1]
def play(agent, path, max_step=50, nb_episodes=10, verbose=True):
"""
This code uses the cooking agent design in the spaceship game.
:param agent: the obj of NeuralAgent, a sample object for the agent
:param path: The path to the game (envo model)
"""
infos_to_request = agent.infos_to_request
infos_to_request.max_score = True # Needed to normalize the scores.
gamefiles = [path]
if os.path.isdir(path):
gamefiles = glob(os.path.join(path, "*.ulx"))
env_id = textworld.gym.register_games(gamefiles,
request_infos=infos_to_request,
max_episode_steps=max_step)
env = gym.make(env_id) # Create a Gym environment to play the text game.
if verbose:
if os.path.isdir(path):
print(os.path.dirname(path), end="")
else:
print(os.path.basename(path), end="")
# Collect some statistics: nb_steps, final reward.
avg_moves, avg_scores, avg_norm_scores, seed_h = [], [], [], 4567
for no_episode in range(nb_episodes):
obs, infos = env.reset() # Start new episode.
env.env.textworld_env._wrapped_env.seed(seed=seed_h)
seed_h += 1
score = 0
done = False
nb_moves = 0
while not done:
command = agent.act(obs, score, done, infos)
obs, score, done, infos = env.step(command)
nb_moves += 1
agent.act(obs, score, done, infos) # Let the agent know the game is done.
if verbose:
print(".", end="")
avg_moves.append(nb_moves)
avg_scores.append(score)
avg_norm_scores.append(score / infos["max_score"])
env.close()
msg = " \tavg. steps: {:5.1f}; avg. score: {:4.1f} / {}."
if verbose:
if os.path.isdir(path):
print(msg.format(np.mean(avg_moves), np.mean(avg_norm_scores), 1))
else:
print(avg_scores)
print(msg.format(np.mean(avg_moves), np.mean(avg_scores), infos["max_score"]))
agent = NeuralAgent()
step_size = 750
print(" ===== Training ===================================================== ")
agent.train() # Tell the agent it should update its parameters.
start_time = time()
print(os.path.realpath("./games/levelMedium_v1.ulx"))
play(agent, "./games/levelMedium_v1.ulx", max_step=step_size, nb_episodes=2000, verbose=False)
print("Trained in {:.2f} secs".format(time() - start_time))
print(' ===== Test ========================================================= ')
agent.test(method='random')
play(agent, "./games/levelMedium_v1.ulx", max_step=step_size) # Medium level game.
save_path = "./model/levelMedium_v1_random.npy"
if not os.path.exists(os.path.dirname(save_path)):
os.mkdir(os.path.dirname(save_path))
np.save(save_path, agent)
```
#### File: batch/tests/test_batch_env.py
```python
import gym
import textworld
import textworld.gym
from textworld import EnvInfos
from textworld.utils import make_temp_directory
def test_batch_env():
batch_size = 4
max_episode_steps = 13
with make_temp_directory() as tmpdir:
options = textworld.GameOptions()
options.path = tmpdir
options.seeds = 1234
options.file_ext = ".ulx"
game_file1, game1 = textworld.make(options)
options.seeds = 4321
options.file_ext = ".z8"
game_file2, game2 = textworld.make(options)
env_options = EnvInfos(inventory=True, description=True,
admissible_commands=True)
env_id = textworld.gym.register_games([game_file1, game_file1, game_file2, game_file2],
request_infos=env_options,
batch_size=batch_size,
max_episode_steps=max_episode_steps,
name="test-auto-reset",
asynchronous=True,
auto_reset=True)
env = gym.make(env_id)
env.reset()
# env.close()
del env
print("OKAY")
```
#### File: textworld/generator/logger.py
```python
import pickle
import textwrap
from collections import defaultdict
__all__ = ["GameLogger"]
def zero():
return 0
def empty_list():
return []
def update_bincount(arr, count):
""" Update bincount in-place. """
if count >= len(arr):
extend_size = count - len(arr) + 1
arr += [0] * extend_size
arr[count] += 1
def merge_bincout(arr1, arr2):
arr = [0] * max(len(arr1), len(arr2))
for i, v in enumerate(arr1):
arr[i] += v
for i, v in enumerate(arr2):
arr[i] += v
return arr
class GameLogger:
def __init__(self, group_actions=True):
self.group_actions = group_actions
# Stats
self.n_games = 0
self.dist_obj_type = defaultdict(zero)
self.dist_obj_type_count = defaultdict(empty_list)
self.dist_cmd_type = defaultdict(zero)
self.dist_final_cmd_type = defaultdict(zero)
self.dist_quest_count = []
self.dist_quest_length_count = []
self.dist_obj_count = []
self.dist_inventory_size = []
self.quests = set()
self.objects = set()
# TODO:
# Get statistic for:
# - Avg. description length
# - Avg. number of container/supporters
# - Avg. number of items in container/on supporters
# - Avg. number of free exits per world
# - Avg. number of doors per world
# - Avg. number of contiendoor per world
# - Distribution of the commands type
# - Distribution of the objects names
# - Number of already seen environments
# - Number of commands per game
def collect(self, game):
self.n_games += 1
# Collect distribution of nb. of commands.
update_bincount(self.dist_quest_count, len(game.quests))
# Collect distribution of commands leading to winning events.
for quest in game.quests:
self.quests.add(quest.desc)
for event in quest.win_events:
actions = event.actions
update_bincount(self.dist_quest_length_count, len(actions))
for action in actions:
action_name = action.name
if self.group_actions:
action_name = action_name.split("-")[0].split("/")[0]
self.dist_cmd_type[action_name] += 1
self.dist_final_cmd_type[action_name] += 1
# Collect distribution of object's types.
dist_obj_type = defaultdict(lambda: 0)
interactable_objects = game.world.objects
inventory = game.world.get_objects_in_inventory()
for obj in interactable_objects:
self.objects.add(game.infos[obj.id].name)
dist_obj_type[obj.type] += 1
nb_objects = 0
for type_ in game.kb.types:
if type_ in ["I", "P", "t", "r"]:
continue
count = dist_obj_type[type_]
nb_objects += count
self.dist_obj_type[type_] += count
update_bincount(self.dist_obj_type_count[type_], count)
update_bincount(self.dist_obj_count, nb_objects)
update_bincount(self.dist_inventory_size, len(inventory))
def display_stats(self):
print(self.stats())
def stats(self):
txt = textwrap.dedent("""\
Nb. games: {n_games}
Quests count: {dist_quest_count} ({unique_quest_count} unique)
Quest length count: {dist_quest_length_count}
Objects: {dist_obj_count} ({unique_obj_count} unique)
Inventory: {dist_inventory_size}
Objects types overall:
{dist_obj_type}
Objects types per game:
{dist_obj_type_count}
Commands types [{nb_cmd_type}]:
{dist_cmd_type}
Final command types [{nb_final_cmd_type}]:
{dist_final_cmd_type}
""")
def bincount_str(bincount):
text = []
for i, c in enumerate(bincount):
text.append(str(c))
if (i + 1) % 5 == 0 and (i + 1) < len(bincount):
text.append("|")
return " ".join(text)
def frequencies_str(freqs):
if len(freqs) == 0:
return ""
text = []
labels_max_len = max(map(len, freqs.keys()))
total = float(sum(freqs.values()))
for k in sorted(freqs.keys()):
text += ["{}: {:5.1%}".format(k.rjust(labels_max_len),
freqs[k] / total)]
return "\n ".join(text)
dist_quest_count = bincount_str(self.dist_quest_count)
dist_quest_length_count = bincount_str(self.dist_quest_length_count)
dist_inventory_size = bincount_str(self.dist_inventory_size)
dist_cmd_type = frequencies_str(self.dist_cmd_type)
dist_final_cmd_type = frequencies_str(self.dist_final_cmd_type)
dist_obj_count = bincount_str(self.dist_obj_count)
dist_obj_type = " ".join("{}:{}".format(k, self.dist_obj_type[k])
for k in sorted(self.dist_obj_type.keys()))
dist_obj_type_count = "\n ".join(type_ + ": " + bincount_str(self.dist_obj_type_count[type_])
for type_ in sorted(self.dist_obj_type_count.keys()))
txt = txt.format(n_games=self.n_games,
dist_quest_count=dist_quest_count,
unique_quest_count=len(self.quests),
dist_quest_length_count=dist_quest_length_count,
dist_cmd_type=dist_cmd_type,
dist_final_cmd_type=dist_final_cmd_type,
dist_obj_count=dist_obj_count,
unique_obj_count=len(self.objects),
dist_obj_type=dist_obj_type,
dist_obj_type_count=dist_obj_type_count,
dist_inventory_size=dist_inventory_size,
nb_cmd_type=len(self.dist_cmd_type),
nb_final_cmd_type=len(self.dist_final_cmd_type))
return txt
def aggregate(self, other):
assert self.group_actions == other.group_actions
self.n_games += other.n_games
for k, v in other.dist_obj_type.items():
self.dist_obj_type[k] += v
for k, v in other.dist_obj_type_count.items():
self.dist_obj_type_count[k] = merge_bincout(self.dist_obj_type_count[k], v)
for k, v in other.dist_cmd_type.items():
self.dist_cmd_type[k] += v
for k, v in other.dist_final_cmd_type.items():
self.dist_final_cmd_type[k] += v
self.dist_quest_count = merge_bincout(self.dist_quest_count, other.dist_quest_count)
self.dist_quest_length_count = merge_bincout(self.dist_quest_length_count, other.dist_quest_length_count)
self.dist_obj_count = merge_bincout(self.dist_obj_count, other.dist_obj_count)
self.dist_inventory_size = merge_bincout(self.dist_inventory_size, other.dist_inventory_size)
self.quests |= other.quests
self.objects |= other.objects
def save(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self, f, protocol=2)
@staticmethod
def load(filename):
with open(filename, 'rb') as f:
return pickle.load(f)
```
#### File: generator/tests/test_init.py
```python
import numpy.testing as npt
from textworld.generator import make_world, make_small_map, make_world_with
from textworld.logic import Variable, Proposition
def test_make_world_no_rng():
world = make_world(1)
assert world is not None
def test_make_small_map_too_big():
# small maps have max size
npt.assert_raises(ValueError, make_small_map, n_rooms=6)
def test_make_small_map():
world = make_small_map(n_rooms=4)
assert world is not None
def test_make_world_with():
r1 = Variable("r_0", "r")
P = Variable('P')
world = make_world_with(rooms=[r1])
assert Proposition('at', [P, r1]) in world.facts
```
#### File: gym/envs/utils.py
```python
from typing import Iterable, Any
from numpy.random import RandomState
def shuffled_cycle(iterable: Iterable[Any], rng: RandomState, nb_loops: int = -1) -> Iterable[Any]:
"""
Yield each element of `iterable` one by one, then shuffle the elements
and start yielding from the start. Stop after `nb_loops` loops.
Arguments:
iterable: Iterable containing the elements to yield.
rng: Random generator used to shuffle the elements after each loop.
nb_loops: Number of times to go through all the elements. If set to -1,
loop an infinite number of times.
"""
elements = []
for e in iterable:
elements.append(e)
yield e
cpt = nb_loops
while cpt != 0:
cpt -= 1
rng.shuffle(elements)
for e in elements:
yield e
```
#### File: textworld/tests/test_core.py
```python
import unittest
from textworld.core import EnvInfos, GameState
class TestEnvInfos(unittest.TestCase):
@classmethod
def setUpClass(cls):
args = [(slot, True) for slot in EnvInfos.__slots__ if slot != "extras"]
cls.env_empty = EnvInfos()
cls.env_half = EnvInfos(**dict(args[::2]), extras=["extra1"])
cls.env_full = EnvInfos(**dict(args), extras=["extra1", "extra2"])
def test_eq(self):
assert self.env_empty != self.env_half
assert self.env_empty != self.env_full
assert self.env_half != self.env_full
def test_copy(self):
for env in [self.env_empty, self.env_half, self.env_full]:
copy = env.copy()
assert id(copy) != id(env)
assert id(copy.extras) != id(env.extras)
assert copy == env
class TestGameState(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.state = GameState()
cls.state["field_str"] = "value1"
cls.state["field_int"] = 42
cls.state["field_float"] = 4.2
cls.state["field_list"] = ["str", -1, True, 1.2]
def test_copy(self):
state = self.state.copy()
assert id(state) != id(self.state)
assert state == self.state
# Make sure it's a deepcopy.
assert id(state["field_list"]) != id(self.state["field_list"])
``` |
{
"source": "JohnnyTh/pytorch_multiproject",
"score": 2
} |
#### File: pytorch_multiproject/tests/mask_r_cnn_trainer_test.py
```python
import os
import sys
ROOT_DIR = os.path.dirname(os.path.abspath(os.path.dirname('__file__')))
sys.path.append(ROOT_DIR)
import torch
from mock import MagicMock
from mock import patch
from trainers.mask_r_cnn_trainer import MaskRCNNTrainer
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class ModelTestMock:
def __init__(self):
self.training = True
self.is_cuda = False
def __call__(self, images, *args):
if self.training:
losses_dict = {'loss_box_reg': torch.rand(1)*10,
'loss_classifier': torch.rand(1)*10,
'loss_mask': torch.rand(1)*10,
'loss_objectness': torch.rand(1)*10,
'loss_rpn_box_reg': torch.rand(1)*10}
return losses_dict
else:
dict_img_1 = {'boxes': torch.rand((100, 4)) * torch.randint(250, ((1), )),
'labels': torch.ones(100),
'masks': torch.randint(2, (100, 1, 250, 250)),
'scores': torch.FloatTensor(100).uniform_(0.6, 0.95)}
dict_img_2 = {'boxes': torch.rand((100, 4)) * torch.randint(250, ((1),)),
'labels': torch.ones(100),
'masks': torch.randint(2, (100, 1, 250, 250)),
'scores': torch.FloatTensor(100).uniform_(0.6, 0.95)}
return [dict_img_1, dict_img_2]
def __next__(self):
return self
def to(self, device):
return self
def parameters(self):
return self
def train(self):
self.training = True
def eval(self):
self.training = False
def state_dict(self):
pass
def load_state_dict(self, *args, **kwargs):
pass
class DatasetMock:
def __init__(self, n):
self.n = n
def __getitem__(self, item):
img = torch.rand(3, 250, 250)
target_dict = {'area': torch.rand(2) * 1000,
'boxes': torch.randint(250, (2, 4), dtype=torch.float32),
'image_id': torch.tensor([item], dtype=torch.int64),
'iscrowd': torch.zeros(2, dtype=torch.uint8),
'labels': torch.ones(2, dtype=torch.int64),
'masks': torch.randint(2, (2, 250, 250), dtype=torch.uint8)}
return img, target_dict
def __len__(self):
return self.n
class DataloaderTestMock:
def __init__(self, n):
self.n = n
self.num = 0
self.dataset = DatasetMock(n)
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
# first out - source img, second - target img
if self.num < self.n:
cur, self.num = self.num, self.num + 1
img = (torch.rand(3, 250, 250), )
target_dict = {'area': torch.rand(2)*1000,
'boxes': torch.randint(250, (2, 4), dtype=torch.float32),
'image_id': torch.tensor([cur], dtype=torch.int64),
'iscrowd': torch.zeros(2, dtype=torch.uint8),
'labels': torch.ones(2, dtype=torch.int64),
'masks': torch.randint(2, (2, 250, 250), dtype=torch.uint8)}
target = (target_dict, )
return tuple([img, target])
else:
self.num = 0
raise StopIteration()
def __len__(self):
return self.n
mock_optim = MagicMock()
# we need to return lr since it is required by metric logger
mock_optim.param_groups.__getitem__.return_value = {'lr': torch.rand(1).item()}
test_data = {
'dataloaders': {'train': DataloaderTestMock(5),
'val': DataloaderTestMock(5)},
'root': '/home',
'model': ModelTestMock(),
'criterion': None,
'optimizer': mock_optim,
'scheduler': MagicMock(),
'metrics': {},
'epochs': 10,
'checkpoint': {'start_epoch': 6}
}
deserialize_data = {
'epoch': 5,
'model_state': 'chkpt_model',
'best_metrics': {},
'optimizer': {
'name': mock_optim.__class__.__name__,
'state': 'chkpt_optim'},
'scheduler': {
'name': MagicMock().__class__.__name__,
'state': 'chkpt_sched'
}
}
@patch('os.mkdir')
@patch('torch.Tensor.backward', return_value=None)
@patch('utils.detection_evaluator.Image.Image.save')
@patch('trainers.mask_r_cnn_trainer.warmup_lr_scheduler')
@patch('trainers.mask_r_cnn_trainer.MaskRCNNTrainer._serialize', return_value=None)
@patch('trainers.mask_r_cnn_trainer.MaskRCNNTrainer.write_log')
@patch('trainers.mask_r_cnn_trainer.MaskRCNNTrainer.write_log')
def test_train_run(self, _, __, ___, ____, _____, ______):
trainer = MaskRCNNTrainer(dataloaders=test_data['dataloaders'], root=test_data['root'],
model=test_data['model'], criterion=test_data['criterion'],
optimizer=test_data['optimizer'], scheduler=test_data['scheduler'],
metrics=test_data['metrics'], epochs=test_data['epochs'])
trainer.train()
@patch('os.mkdir')
@patch('torch.Tensor.backward', return_value=None)
@patch('torch.load', return_value=deserialize_data)
@patch('utils.detection_evaluator.Image.Image.save')
@patch('trainers.mask_r_cnn_trainer.MaskRCNNTrainer._serialize', return_value=None)
@patch('trainers.mask_r_cnn_trainer.MaskRCNNTrainer.write_log')
def test_train_deserialize_and_run(self, _, __, ___, ____, _____):
# Assuming we trained the model from epoch 1 to 5, then saved it and now want to restart
trainer = MaskRCNNTrainer(dataloaders=test_data['dataloaders'], root=test_data['root'],
model=test_data['model'], criterion=test_data['criterion'],
optimizer=test_data['optimizer'], scheduler=test_data['scheduler'],
metrics=test_data['metrics'], epochs=test_data['epochs'])
trainer._deserialize('/does_not_matter')
assert trainer.start_epoch == 6
assert trainer.epochs == test_data['epochs'] + 6
trainer.train()
@patch('os.mkdir')
@patch('trainers.mask_r_cnn_trainer.save_image')
@patch('torch.load', return_value=deserialize_data)
@patch('trainers.mask_r_cnn_trainer.MaskRCNNTrainer.write_log')
def test_test_run(self, _, __, ___):
trainer = MaskRCNNTrainer(dataloaders=test_data['dataloaders']['val'], root=test_data['root'],
model=test_data['model'], criterion=test_data['criterion'],
optimizer=test_data['optimizer'], scheduler=test_data['scheduler'],
metrics=test_data['metrics'], epochs=test_data['epochs'])
trainer.test()
```
#### File: pytorch_multiproject/trainers/base_trainer.py
```python
from abc import ABC, abstractmethod
class BaseTrainer(ABC):
@abstractmethod
def _train_step(self, epoch):
raise NotImplementedError
@abstractmethod
def train(self):
raise NotImplementedError
@abstractmethod
def _serialize(self, epoch):
raise NotImplementedError
@abstractmethod
def _deserialize(self, chckpt_path):
raise NotImplementedError
```
#### File: pytorch_multiproject/trainers/generic_trainer.py
```python
import os
import logging
import torch
from trainers.base_trainer import BaseTrainer
from abc import abstractmethod
class GenericTrainer(BaseTrainer):
def __init__(self, root, model, criterion, optimizer, scheduler, metrics, epochs, hyperparams=None,
save_dir=None, checkpoint=None, change_lr=False):
""" Generic trainer; implements train(), _serialize(), and _deserialize methods.
root (str): project root directory.
model (callable): an instance of custom neural network class inheriting from nn.Module class.
criterion (callable): a loss function.
optimizer (optimizer object): object implementing optimization algorithm.
scheduler (lr_scheduler): learning rate scheduler object, changes lr of the optimizer every time step()
method is called.
metrics (dict): dict containing metrics, specific for every custom trainer implementation
epochs (int): number of training epochs
hyperparams (dict): various hyperparameters we might need inside GenericTrainer
or its custom implementations
save_dir (str): dir to save the trained models and generated images
checkpoint (str, optional): checkpoint path to resume the training from
change_lr (bool): if True, learning rate of the optimizer will be changed to provided value even
if the model is restored from checkpoint. Uses self.hyperparams['lr'] value.
"""
self.logger = logging.getLogger('trainer')
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.root = root
self.model = model.to(self.device)
self.name = model.__class__.__name__
self.criterion = criterion
self.optimizer = optimizer
self.scheduler = scheduler
self.best_metrics = metrics
self.epochs = epochs
if hyperparams is None:
hyperparams = dict({})
self.hyperparams = hyperparams
self.start_epoch = 1
self.generic_logger = logging.getLogger(os.path.basename(__file__))
self.change_lr = change_lr
if save_dir is not None:
self.save_dir = save_dir
else:
# if custom save dir not provided, save in project folder instead
self.save_dir = os.path.join(self.root, 'saved')
# create a directory for saving the output
if not os.path.exists(self.save_dir):
os.mkdir(self.save_dir)
if checkpoint is not None:
self._deserialize(checkpoint)
@abstractmethod
def _train_step(self, epoch):
# this method is implemented in custom Trainers
raise NotImplementedError
def train(self):
# training loop through epochs, saves the model if some criteria are satisfied during the training
for epoch in range(self.start_epoch, self.epochs+1):
res = self._train_step(epoch)
if res['best_performance']:
self._serialize(epoch)
def _serialize(self, epoch):
# save the model and some other parameters
if self.scheduler is not None:
sched_state = {'name': self.scheduler.__class__.__name__,
'state': self.scheduler.state_dict()}
else:
sched_state = None
state = {
'epoch': epoch,
'model_name': self.name,
'model_state': self.model.state_dict(),
'optimizer': {'name': self.optimizer.__class__.__name__,
'state': self.optimizer.state_dict()},
'scheduler': sched_state,
'best_metrics': self.best_metrics
}
chkpt = '{}_epoch_{}.pth'.format(self.name, epoch)
file_path = os.path.join(self.save_dir, chkpt)
torch.save(state, file_path)
self.generic_logger.info('Saving the model at {}'.format(file_path))
def _deserialize(self, load_path):
# restore the model and other parameters from the checkpoint file ('xxx.pth')
checkpoint = torch.load(load_path)
self.start_epoch = checkpoint['epoch'] + 1
self.epochs = self.epochs + self.start_epoch
self.model.load_state_dict(checkpoint['model_state'])
self.best_metrics = checkpoint['best_metrics']
# restore optimizer from checkpoint
if checkpoint['optimizer']['name'] != self.optimizer.__class__.__name__:
self.logger.warning("Warning: Given optimizer type is different from that of checkpoint. "
"Optimizer parameters not being resumed.")
else:
self.optimizer.load_state_dict(checkpoint['optimizer']['state'])
# manually adjust the lr of the optimizer
if self.change_lr is True:
try:
# standard pytorch optimizer is an iterable
iter(self.optimizer)
for param_group in self.optimizer:
param_group['lr'] = self.hyperparams.get('lr', 0.0002)
# if we are using custom optimizer, it is not iterable
except TypeError:
if hasattr(self.optimizer, 'change_lr'):
self.optimizer.change_lr(self.hyperparams.get('lr', 0.0002))
self.logger.info('Learning rate has been changed!')
else:
raise NotImplementedError('required method change_lr not implemented in provided optimizer object')
# restore scheduler parameters from the checkpoint
if checkpoint['scheduler'] is not None:
if checkpoint['scheduler']['name'] != self.scheduler.__class__.__name__:
self.logger.warning("Warning: Given scheduler type is different from that of checkpoint. "
"Scheduler parameters not being resumed.")
else:
self.scheduler.load_state_dict(checkpoint['scheduler']['state'])
self.logger.info('Resuming from checkpoint...')
```
#### File: pytorch_multiproject/utils/detection_evaluator.py
```python
import os
import logging
import numpy as np
from PIL import Image
from PIL import ImageDraw
class DetectionEvaluator:
def __init__(self, save_dir):
"""
To calculate the object detection scores, we accumulate ground the images, ground truth labels (targets) and
predictions of our model during the val phase, then compute the necessary metrics (e.g. bounding box mAP)
Parameters
----------
save_dir (str): a path where the outputs of the class method self.save_bboxes_masks(..) will be saved.
"""
self.logger = logging.getLogger(os.path.basename(__file__))
self.data = []
self.save_dir = save_dir
def accumulate(self, save_img, targets, predictions):
"""
Collects the data during the model operation in val phase for further processing.
Parameters
----------
save_img (numpy.ndarray): an array of shape (H, W, C) representing the input image.
targets (dict): contains ground truth coordinates of bounding boxes, segmentation mask values, etc.
predictions (dict): contains predicted values of bounding boxes, segmentation masks, etc.
"""
self.data.append([save_img, targets, predictions])
def bbox_score(self, iou_threshold=0.5, non_max_iou_thresh=0.5, score_threshold=0.6):
"""
Calculates the bounding box score using the accumulated ground truth targets and respective predictions.
Parameters
----------
iou_threshold (float, 0. to 1., optional): determines whether predicted bounding box is considered
false positive (bbox IoU < iou_threshold) or true positive (bbox IoU > iou_threshold).
non_max_iou_thresh (float, 0. to 1., optional): non-max suppression threshold, used to discard predicted
bounding boxes that have IoU > threshold with any other predicted bbox.
score_threshold (float, 0. to 1., optional): used to discard all bounding boxes with confidence < threshold.
Returns
-------
avg_precision (): mAP score.
precision (float): accumulated precision based on predictions and targets on self.data.
recall (float): accumulated recall based on predictions and targets on self.data.
remaining_idx (list): indices of remaining bounding boxes for each element in self.data.
"""
remaning_idx = []
true_positive = np.array([])
false_positive = np.array([])
num_ground_truths = 0
for _, targets, predictions in self.data:
bboxes_targets = targets['boxes']
bboxes_pred = predictions['boxes']
bboxes_pred_score = predictions['scores']
if not isinstance(bboxes_targets, np.ndarray):
bboxes_targets = bboxes_targets.numpy()
if not isinstance(bboxes_pred, np.ndarray):
bboxes_pred = bboxes_pred.numpy()
if not isinstance(bboxes_pred_score, np.ndarray):
bboxes_pred_score = bboxes_pred_score.numpy()
# apply non-max suppression to predictions
bboxes_pred_suppr, _, idx = self.non_max_suppr_binary(bboxes_pred_score, bboxes_pred,
score_threshold, non_max_iou_thresh)
remaning_idx.append(idx)
# since number of predicted boxes is usually different from the number of true boxes, we need
# to create all the possible combinations of true and predicted bbox coordinates for iou calculations
targets_predictions_comb = np.hstack([np.repeat(bboxes_pred_suppr, bboxes_targets.shape[0], axis=0),
np.tile(bboxes_targets, (bboxes_pred_suppr.shape[0], 1))])
self.logger.debug(targets_predictions_comb)
# compute ious for all the possible combinations of predcitions and targets
iou = self.batch_iou(targets_predictions_comb[:, :4], targets_predictions_comb[:, 4:])
self.logger.debug(iou)
# rearrange iou into separate groups - one group for each prediction
# corresponding to ious of each prediction with all the ground truth (or target) bboxes
iou = np.hsplit(iou, bboxes_pred_suppr.shape[0])
self.logger.debug(iou)
# intermediate containers to accumulate true and false positives during one iteration
# note that length of one container corresponds to the number of predictions
true_pos_iter = np.zeros(len(iou))
false_pos_iter = np.zeros(len(iou))
# collect the number of ground truths in each target - prediction pair for recall calculation
num_ground_truths += bboxes_targets.shape[0]
# iterate trough groups in calculated ious. One group corresponds to ious of one prediction with all
# the targets.
guessed_bboxes = []
for group_idx, iou_group in enumerate(iou):
guessed = False
for target_idx, iou in enumerate(iou_group):
if iou > iou_threshold and target_idx not in guessed_bboxes:
guessed_bboxes.append(target_idx)
true_pos_iter[group_idx] += 1
guessed = True
# if the prediction guessed no bboxes and we are at the end of the list
# count it as fp
if guessed is False and target_idx == (len(iou_group) - 1):
false_pos_iter[group_idx] += 1
self.logger.debug('guessed bboxes: ' + str(guessed_bboxes))
true_positive = np.append(true_positive, true_pos_iter)
false_positive = np.append(false_positive, false_pos_iter)
self.logger.debug('collected_tps:'+str(true_positive))
self.logger.debug('collected_fps:' + str(false_positive))
accum_tp = np.cumsum(true_positive)
accum_fp = np.cumsum(false_positive)
precision = np.divide(accum_tp, (accum_tp + accum_fp))
recall = accum_tp / num_ground_truths
self.logger.debug('Precision :'+str(precision))
self.logger.debug('Recall :'+str(recall))
avg_precision, _, _ = self.get_average_precision(precision, recall)
self.logger.debug('\n\n')
return avg_precision, precision[-1], recall[-1], remaning_idx
def mask_score(self):
"""
Calculates the mAP score for the generated masks.
"""
raise NotImplementedError
def non_max_suppr_binary(self, bboxes_pred_score, bboxes_pred, score_threshold, iou_threshold):
"""
Binary classification version of non-max suppression
Parameters
----------
bboxes_pred_score (numpy.ndarray[N]): confidence scores for each prediction.
bboxes_pred (numpy.ndarray[N, 4]): predicted boxes in format [x1, y1, x2, y2], with values between
0 and H, 0 and W
score_threshold (float, 0. to 1.): used to discard all bounding boxes with confidence < threshold.
iou_threshold (float, 0. to 1.): non-max suppression threshold, used to discard predicted bounding boxes
that have IoU > threshold with any other predicted bbox.
Returns
-------
out_bboxes (numpy.ndarray): bounding boxes after non-max suppression.
out_scores (numpy.ndarray): confidence scores of bounding boxes after non-max suppression.
out_idx (numpy.ndarray): indices of bounding boxes remaining after non-max suppression.
"""
remaining_idx = np.arange(bboxes_pred_score.shape[0])
# firstly we discard all bbox predictions where class prob < score_threshold
selected_idx = np.argwhere(bboxes_pred_score > score_threshold).flatten()
selected_bboxes = bboxes_pred[selected_idx]
selected_scores = bboxes_pred_score[selected_idx]
remaining_idx = remaining_idx[selected_idx]
out_bboxes = np.empty((0, 4))
out_scores = np.array([])
out_idx = np.array([])
# continue iterations until the list of scores is depleted
while len(selected_scores) > 0:
highest_score_idx = np.argmax(selected_scores)
top_score = selected_scores[highest_score_idx]
top_bbox = selected_bboxes[highest_score_idx]
top_idx = remaining_idx[highest_score_idx]
selected_scores = np.delete(selected_scores, highest_score_idx)
selected_bboxes = np.delete(selected_bboxes, highest_score_idx, axis=0)
remaining_idx = np.delete(remaining_idx, highest_score_idx)
# to prevent selected_bboxes matrix from collapsing into a vector
if len(selected_bboxes.shape) == 1:
selected_bboxes = np.expand_dims(selected_bboxes, 0)
# if we pick the last item from selected_scores and boxes, add it directly to the results
# since there are no items left to compare it against
if len(selected_scores) > 0:
duplicate_boxes_idx = []
for idx, remain_box in enumerate(selected_bboxes):
iou = self.intersection_over_union(top_bbox, remain_box)
if iou > iou_threshold:
duplicate_boxes_idx.append(idx)
# drop duplicate boxes with high intersection if any are found
selected_scores = np.delete(selected_scores, duplicate_boxes_idx)
selected_bboxes = np.delete(selected_bboxes, duplicate_boxes_idx, axis=0)
remaining_idx = np.delete(remaining_idx, duplicate_boxes_idx)
out_scores = np.append(out_scores, top_score)
out_bboxes = np.append(out_bboxes, top_bbox.reshape(1, -1), axis=0)
out_idx = np.append(out_idx, top_idx)
return out_bboxes, out_scores, out_idx
def save_bboxes_masks(self, epoch, bbox_width=2, selected_boxes_ind=None, mask_draw_precision=0.4, opacity=0.4):
"""
Draws bounding boxes and masks on top of the original image and saves the result.
Parameters
----------
epoch (int): epoch number
bbox_width (int, optional): the bbox line width, in pixels.
selected_boxes_ind (list): a list of lists containing indexes of selected bounding boxes
(after non-max suppression) for each image.
mask_draw_precision (float, 0. to 1.): confidence score, above which the mask will be drawn
(each pixel in predicted mask is assigned a confidence score ranging from 0 to 1)
opacity (float, 0. to 1.): mask opacity, 0 - completely transparent, 1 - completely opaque
"""
image_prep_list = list(self.draw_bbox(bbox_width, selected_boxes_ind))
image_prep_list = list(self.generate_masked_img(image_prep_list,
selected_boxes_ind,
mask_draw_precision,
opacity))
for idx, image in enumerate(image_prep_list):
save_addr = os.path.join(self.save_dir, 'Test_img_{}_{}'.format(epoch, idx))
image.save(save_addr, 'PNG')
def draw_bbox(self, bbox_width=2, selected_boxes_ind=None):
"""
Generator method.
Draws bounding boxes on top of original image (green - ground truth, red - predicted bounding box).
Yields resulting images
Parameters
----------
bbox_width (int, optional): the bbox line width, in pixels.
selected_boxes_ind (list, optional): a list of lists containing indices of bboxes remaining after non-max
suppression.
"""
for idx, data in enumerate(self.data):
image, targets, predictions = data
pred_bboxes = predictions['boxes']
# select only bboxes remaining after suppression
if selected_boxes_ind is not None:
idx_group = selected_boxes_ind[idx]
pred_bboxes = pred_bboxes[idx_group]
targets_bboxes = targets['boxes']
if not isinstance(targets_bboxes, np.ndarray):
targets_bboxes = targets_bboxes.numpy()
image_prep = Image.fromarray(image)
draw = ImageDraw.Draw(image_prep)
for target_bbox in targets_bboxes:
draw.rectangle((tuple(target_bbox[:2]), tuple(target_bbox[2:])),
outline='green', width=bbox_width)
for single_pred in pred_bboxes:
draw.rectangle((tuple(single_pred[:2]), tuple(single_pred[2:])),
outline='red', width=bbox_width)
yield image_prep
def generate_masked_img(self, image_prep_list, selected_boxes_ind=None, mask_draw_precision=0.4, opacity=0.4):
"""
Generator method.
Overlays all the generated masks on top of the original image. Yields resulting images.
Parameters
----------
selected_boxes_ind (list), mask_draw_precision (float, 0. to 1.), opacity (float, 0. to 1.).
"""
for idx, data in enumerate(self.data):
image = image_prep_list[idx]
masks = data[2]['masks'].mul(255).byte().numpy()
# expand (H, W) masks
if len(masks.shape) == 2:
masks = masks[np.newaxis, np.newaxis, :, :]
elif len(masks.shape) == 3:
masks = masks[:, np.newaxis, :, :]
if isinstance(image, np.ndarray):
image_prep = Image.fromarray(image)
elif isinstance(image, Image.Image):
image_prep = image
else:
raise TypeError('The provided image type must be PIL image of numpy.ndarray')
# add alpha channel to the original image
image_prep.putalpha(255)
if selected_boxes_ind is not None:
idx_group = selected_boxes_ind[idx]
if idx_group.dtype != int:
idx_group = idx_group.astype(int)
# pick only those masks that correspond to the bounding boxes after non-max suppression
masks = masks[idx_group]
for mask in masks:
colors = self.generate_color_scheme()
# firstly generate 3 color channels and alpha channel
mask = np.repeat(mask, 4, axis=0)
# replace ones at each color channel with respective color if mask probability > mask_draw_precision
# and zero out the values below mask_draw_precision
for channel in range(len(colors)):
bool_mask_keep = mask[channel] >= int(255*mask_draw_precision)
bool_mask_erase = mask[channel] < int(255*mask_draw_precision)
mask[channel][bool_mask_keep] = colors[channel]
mask[channel][bool_mask_erase] = 0
# fill alpha channel values using R channel as a reference
mask[3, :, :][mask[0, :, :] > 0] = int(255*opacity)
mask[3, :, :][mask[0, :, :] == 0] = 0
# convert the mask into H, W, C format
mask = np.transpose(mask, (1, 2, 0))
# convert the prepared mask into PIL Image object
mask_prep = Image.fromarray(mask)
# combine the mask and the image
image_prep = Image.alpha_composite(image_prep, mask_prep)
yield image_prep
@staticmethod
def generate_color_scheme():
# generates a random color scheme to color the masks
return np.random.choice(range(255), size=3)
@staticmethod
def get_average_precision(precision, recall):
"""
Computes mAP as approximated AUC of Precision x Recall curve.
# More details here: https://github.com/rafaelpadilla/Object-Detection-Metrics
Parameters
----------
precision (numpy.ndarray): precision values for all val dataset.
recall (numpy.ndarray): recall values for all val dataset.
Returns
-------
avg_precision (float): mAP score.
m_precision (numpy.ndarray): interpolated precision values.
m_recall (numpy.ndarray): modified recall values used to calculate the interpolated precision.
"""
m_precision = list()
m_precision.append(0)
[m_precision.append(value) for value in precision]
m_precision.append(0)
m_recall = list()
m_recall.append(0)
[m_recall.append(value) for value in recall]
m_recall.append(1)
# interpolate precision by going backwards and overwriting all encountered precision values
# with the largest found value
for i in range(len(m_precision) - 1, 0, -1):
m_precision[i - 1] = max(m_precision[i - 1], m_precision[i])
# locate indices of steps in recall value list (places where recall values change)
recall_deltas_idx = []
for i in range(len(m_recall) - 1):
if m_recall[1:][i] != m_recall[0:-1][i]:
recall_deltas_idx.append(i + 1)
# compute avg precision as an area of interpolated precision - recall squares
avg_precision = 0
for i in recall_deltas_idx:
avg_precision = avg_precision + (m_recall[i] - m_recall[i - 1]) * m_precision[i]
return avg_precision, m_precision, m_recall
@staticmethod
def intersection_over_union(bbox_1, bbox_2):
"""
Calculates IoU for two bounding boxes.
Parameters
----------
bbox_1 (array-like): coordinates of first bounding box in the format [x1, y1, x2, y2].
bbox_2 (array-like):coordinates of second bounding box in the format [x1, y1, x2, y2].
Returns
-------
iou (float): IoU value.
"""
bbox_1_x0 = bbox_1[0]
bbox_1_y0 = bbox_1[1]
bbox_1_x1 = bbox_1[2]
bbox_1_y1 = bbox_1[3]
bbox_2_x0 = bbox_2[0]
bbox_2_y0 = bbox_2[1]
bbox_2_x1 = bbox_2[2]
bbox_2_y1 = bbox_2[3]
# determine the coordinates of the intersection rectangle
x_left = max(bbox_1_x0, bbox_2_x0)
y_top = max(bbox_1_y0, bbox_2_y0)
x_right = min(bbox_1_x1, bbox_2_x1)
y_bottom = min(bbox_1_y1, bbox_2_y1)
if x_right < x_left or y_bottom < y_top:
return 0.0
intersect_area = (x_right - x_left + 1) * (y_bottom - y_top + 1)
bbox_1_area = (bbox_1_x1 - bbox_1_x0 + 1) * (bbox_1_y1 - bbox_1_y0 + 1)
bbox_2_area = (bbox_2_x1 - bbox_2_x0 + 1) * (bbox_2_y1 - bbox_2_y0 + 1)
iou = intersect_area / (bbox_1_area + bbox_2_area - intersect_area)
return iou
@staticmethod
def batch_iou(bbox_array_1, bbox_array_2):
"""
Calculates IoU for batched pairs of bonding boxes
Parameters
----------
bbox_array_1 (numpy.ndarray[N, 4]): array of coordinates of first bounding box group.
bbox_array_2 (numpy.ndarray[N, 4]): array of coordinates of first bounding box group.
Returns
-------
iou (numpy.ndarray[N]): IoU scores.
"""
bbox_1_x0 = bbox_array_1[:, 0]
bbox_1_y0 = bbox_array_1[:, 1]
bbox_1_x1 = bbox_array_1[:, 2]
bbox_1_y1 = bbox_array_1[:, 3]
bbox_2_x0 = bbox_array_2[:, 0]
bbox_2_y0 = bbox_array_2[:, 1]
bbox_2_x1 = bbox_array_2[:, 2]
bbox_2_y1 = bbox_array_2[:, 3]
# determine the coordinates of the intersection rectangle
x_left = np.maximum(bbox_1_x0, bbox_2_x0)
y_top = np.maximum(bbox_1_y0, bbox_2_y0)
x_right = np.minimum(bbox_1_x1, bbox_2_x1)
y_bottom = np.minimum(bbox_1_y1, bbox_2_y1)
width = x_right - x_left + 1
height = y_bottom - y_top + 1
width[width < 0] = 0
height[height < 0] = 0
intersect_area = height * width
bbox_1_area = (bbox_1_x1 - bbox_1_x0 + 1) * (bbox_1_y1 - bbox_1_y0 + 1)
bbox_2_area = (bbox_2_x1 - bbox_2_x0 + 1) * (bbox_2_y1 - bbox_2_y0 + 1)
iou = intersect_area / (bbox_1_area + bbox_2_area - intersect_area)
return iou
```
#### File: pytorch_multiproject/utils/gan_horses_dataset.py
```python
import sys
import os
ROOT_DIR = os.path.dirname(os.path.abspath(os.path.dirname('__file__')))
sys.path.append(ROOT_DIR)
import requests
import zipfile
data_root = os.path.join(ROOT_DIR, 'resources')
def get_data():
url = 'https://people.eecs.berkeley.edu/~taesung_park/CycleGAN/datasets/horse2zebra.zip'
r = requests.get(url)
local_file_path = os.path.join(data_root, 'horse2zebra_.zip')
os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
with open(local_file_path, 'wb') as f:
f.write(r.content)
get_data()
with zipfile.ZipFile(os.path.join(data_root, 'horse2zebra_.zip'), 'r') as zip_ref:
zip_ref.extractall(path=data_root)
``` |
{
"source": "Johnnytjn/RNN_classify",
"score": 2
} |
#### File: Johnnytjn/RNN_classify/rnn.py
```python
import tensorflow as tf
import os
import opennmt as onmt
from utils.model_helper import (get_device_str, residual_rnn_cell,
single_rnn_cell)
from utils.rnn_cells import WeightTyingWrapper, WeightTyingLayer
class RNN(object):
"""RNN model for text classify
"""
def __init__(self, hparams):
self.hparams = hparams
def build(self):
self.setup_input_placeholders()
self.setup_embedding()
self.setup_rnn()
self.setup_loss()
if self.is_training():
self.setup_training()
self.setup_summary()
self.saver = tf.train.Saver(tf.global_variables())
def init_model(self, sess, initializer=None):
if initializer:
sess.run(initializer)
else:
sess.run(tf.global_variables_initializer())
def save_model(self, sess):
return self.saver.save(sess, os.path.join(self.hparams.checkpoint_dir,
"model.ckpt"), global_step=self.global_step)
def restore_model(self, sess, epoch=None):
if epoch is None:
self.saver.restore(sess, tf.train.latest_checkpoint(
self.hparams.checkpoint_dir))
else:
self.saver.restore(
sess, os.path.join(self.hparams.checkpoint_dir, "model.ckpt" + ("-%d" % epoch)))
print("restored model")
def setup_input_placeholders(self):
self.source_tokens = tf.placeholder(
tf.int32, shape=[None, self.hparams.seq_len], name='source_tokens')
self.targets = tf.placeholder(
tf.int32, shape=[None], name='class')
self.batch_size = self.hparams.batch_size
# for training and evaluation
if self.hparams.mode in ['train', 'eval']:
self.dropout_keep_prob = tf.placeholder(
dtype=tf.float32, name='keep_prob')
global_step = tf.Variable(
initial_value=0,
name="global_step",
trainable=False,
collections=[tf.GraphKeys.GLOBAL_STEP, tf.GraphKeys.GLOBAL_VARIABLES])
self.global_step = global_step
def is_training(self):
return self.hparams.mode == 'train'
def setup_summary(self):
self.summary_writer = tf.summary.FileWriter(
self.hparams.checkpoint_dir, tf.get_default_graph())
tf.summary.scalar("train_loss", self.losses)
tf.summary.scalar("accuracy_summary",self.accuracy)
tf.summary.scalar("learning_rate", self.learning_rate)
tf.summary.scalar('gN', self.gradient_norm)
tf.summary.scalar('pN', self.param_norm)
self.summary_op = tf.summary.merge_all()
def setup_embedding(self):
with tf.variable_scope("embedding") as scope:
self.embedding = tf.get_variable(name='embedding', shape=[
self.hparams.vocab_size, self.hparams.embedding_size])
self.source_embedding = tf.nn.embedding_lookup(
self.embedding, self.source_tokens)
if self.is_training():
self.source_embedding = tf.nn.dropout(
self.source_embedding, keep_prob=self.dropout_keep_prob)
def setup_rnn(self):
with tf.variable_scope("rnn") as scope:
if not self.hparams.weight_tying:
self.output_layer = tf.layers.Dense(self.hparams.vocab_size)
else:
self.output_layer = WeightTyingLayer(self.embedding,self.hparams.vocab_size)
cell_list = []
residual_layers = self.hparams.residual_layers
for i in range(self.hparams.num_layers):
# Note: if we use weight_tying, then the num_units of the last layer of RNN should be equal to embedding size
# This is also the independent embedding size and hidden size
if self.hparams.weight_tying and i == self.hparams.num_layers - 1:
rnn_cell = single_rnn_cell(self.hparams.rnn_cell_name, self.hparams.embedding_size,
self.is_training(), self.hparams.dropout_keep_prob, self.hparams.weight_keep_drop, self.hparams.variational_dropout)
else:
rnn_cell = single_rnn_cell(self.hparams.rnn_cell_name, self.hparams.num_units,
self.is_training(), self.hparams.dropout_keep_prob, self.hparams.weight_keep_drop, self.hparams.variational_dropout)
if i >= self.hparams.num_layers - residual_layers:
# Note: in weight_tying, the num_units of the last layer is different from others
# we cannot add residual layer on it.
if self.hparams.weight_tying and i == self.hparams.num_layers - 1:
pass
else:
wrapper = residual_rnn_cell(self.hparams.residual_type)
rnn_cell = wrapper(rnn_cell)
if self.hparams.num_gpus > 1:
device_str = get_device_str(i, self.hparams.num_gpus)
rnn_cell = tf.contrib.rnn.DeviceWrapper(
rnn_cell, device_str)
cell_list.append(rnn_cell)
if self.hparams.num_gpus > 1:
device_str = get_device_str(i, self.hparams.num_gpus)
rnn_cell = tf.contrib.rnn.DeviceWrapper(
rnn_cell, device_str)
cell_list.append(rnn_cell)
if len(cell_list) > 1:
self.final_cell = tf.contrib.rnn.MultiRNNCell(cells=cell_list)
else:
self.final_cell = cell_list[0]
self.initial_state = self.final_cell.zero_state(self.batch_size,dtype=tf.float32)
if self.hparams.self_attention:
self.final_cell = tf.contrib.rnn.AttentionCellWrapper(
self.final_cell, self.hparams.attn_len)
outputs, _ = tf.nn.dynamic_rnn(cell=self.final_cell, inputs=self.source_embedding, dtype=tf.float32)
outputs = tf.reduce_mean(outputs,axis=1)
fc = tf.layers.dense(outputs, self.hparams.embedding_size, name = 'fc1')
self.logits = tf.layers.dense(fc, self.hparams.vocab_size, name='fc2')
def setup_loss(self):
self.loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits+1e-10,labels=self.targets)
self.losses = tf.reduce_mean(self.loss)
self.prediction = tf.argmax(self.logits,1,output_type=tf.int32)
correct_prediction = tf.equal(self.prediction,self.targets)
self.correct_num=tf.reduce_sum(tf.cast(correct_prediction,tf.float32))
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32),name="accuracy")
def setup_training(self):
# learning rate decay
if self.hparams.decay_schema == 'exp':
self.learning_rate = tf.train.exponential_decay(self.hparams.learning_rate, self.global_step,
self.hparams.decay_steps, 0.96, staircase=True)
else:
self.learning_rate = tf.constant(
self.hparams.learning_rate, dtype=tf.float32)
opt = onmt.utils.optim.get_optimizer_class(
self.hparams.optimizer)(self.learning_rate)
params = tf.trainable_variables()
get_total_param_num(params)
# we need to enable the colocate_gradients_with_ops option in tf.gradients to parallelize the gradients computation.
gradients = tf.gradients(self.losses, params, colocate_gradients_with_ops=True if self.hparams.num_gpus>1 else False)
clipped_gradients, _ = tf.clip_by_global_norm(
gradients, self.hparams.max_gradient_norm)
self.gradient_norm = tf.global_norm(gradients)
self.param_norm = tf.global_norm(params)
self.train_op = opt.apply_gradients(
zip(clipped_gradients, params), global_step=self.global_step)
def feed_state(self, feed_dict, state):
if self.hparams.self_attention:
state, attns, attn_states = state
feed_dict[self.initial_state[1]] = attns
feed_dict[self.initial_state[2]] = attn_states
if self.hparams.num_layers == 1:
initial_state = tuple([self.initial_state[0]])
state = tuple([state])
else:
initial_state = self.initial_state
for i, (c, h) in enumerate(self.initial_state[0]):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
else:
if self.hparams.num_layers == 1:
initial_state = tuple([self.initial_state])
state = tuple([state])
else:
initial_state = self.initial_state
for i, (c, h) in enumerate(initial_state):
feed_dict[c] = state[i].c
feed_dict[h] = state[i].h
return feed_dict
def train_one_batch(self, sess, source, targets, state, run_info=False, add_summary=False):
feed_dict = {self.dropout_keep_prob: self.hparams.dropout_keep_prob}
feed_dict = self.feed_state(feed_dict, state)
feed_dict[self.source_tokens]= source
feed_dict[self.targets] = targets
batch_size = self.batch_size
if run_info:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
_, batch_loss,accuracy, summary, global_step= sess.run(
[self.train_op, self.losses,self.accuracy, self.summary_op,
self.global_step],
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
else:
_, batch_loss,accuracy, summary, global_step= sess.run(
[self.train_op, self.losses, self.accuracy, self.summary_op, self.global_step],
feed_dict=feed_dict)
if run_info:
self.summary_writer.add_run_metadata(
run_metadata, 'step%03d' % global_step)
print("adding run meta for", global_step)
if add_summary:
self.summary_writer.add_summary(summary, global_step=global_step)
return batch_loss, accuracy, global_step, batch_size
def eval_one_batch(self, sess, source, targets, state):
feed_dict = {self.dropout_keep_prob: 1.0}
feed_dict = self.feed_state(feed_dict, state)
feed_dict[self.source_tokens] = source
feed_dict[self.targets] = targets
batch_loss, accuracy= sess.run(
[self.losses,self.accuracy ], feed_dict=feed_dict)
return batch_loss,accuracy
```
#### File: RNN_classify/utils/weight_drop_lstm.py
```python
import tensorflow as tf
from tensorflow.python.util import nest
from tensorflow.python.ops.rnn_cell_impl import _Linear
class WeightDropLSTMCell(tf.contrib.rnn.BasicLSTMCell):
def __init__(self, num_units, weight_keep_drop=0.7, mode=tf.estimator.ModeKeys.TRAIN,
forget_bias=1.0, state_is_tuple=True, activation=None, reuse=None):
"""Initialize the parameters for an LSTM cell.
"""
self.weight_keep_drop = weight_keep_drop
self.mode = mode
super(WeightDropLSTMCell,self).__init__( num_units, forget_bias, state_is_tuple, activation, reuse)
def call(self, inputs, state):
"""Long short-term memory cell (LSTM).
Args:
inputs: `2-D` tensor with shape `[batch_size x input_size]`.
state: An `LSTMStateTuple` of state tensors, each shaped
`[batch_size x self.state_size]`, if `state_is_tuple` has been set to
`True`. Otherwise, a `Tensor` shaped
`[batch_size x 2 * self.state_size]`.
Returns:
A pair containing the new hidden state, and the new state (either a
`LSTMStateTuple` or a concatenated state, depending on
`state_is_tuple`).
"""
sigmoid = tf.sigmoid
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = tf.split(value=state, num_or_size_splits=2, axis=1)
if self._linear is None:
self._linear = _Linear([inputs, h], 4 * self._num_units, True)
if self.mode == tf.estimator.ModeKeys.TRAIN:
mask = tf.ones_like(self._linear._weights)
mask_1, mask_2 = tf.split(mask,num_or_size_splits=2,axis=1)
mask_2 = tf.nn.dropout(mask_2,keep_prob=self.weight_keep_drop) * self.weight_keep_drop
mask = tf.concat([mask_1,mask_2],axis=1)
self._linear._weights = self._linear._weights * mask
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(
value=self._linear([inputs, h]), num_or_size_splits=4, axis=1)
new_c = (
c * sigmoid(f + self._forget_bias) + sigmoid(i) * self._activation(j))
new_h = self._activation(new_c) * sigmoid(o)
if self._state_is_tuple:
new_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h)
else:
new_state = tf.concat([new_c, new_h], 1)
return new_h, new_state
``` |
{
"source": "johnnytorres/crisis_conv_crosslingual",
"score": 3
} |
#### File: crisis_conv_crosslingual/model/embeddings.py
```python
import os
import logging
import argparse
import numpy as np
import tensorflow as tf
from keras_preprocessing.text import Tokenizer
from tqdm import tqdm
from data import DataLoader
class EmbeddingsBuilder:
def __init__(self, args):
logging.info('initializing...')
self.args = args
self.dataset = DataLoader(self.args)
self.embeddings_path = args.embeddings_path
self.small_embeddings_path = os.path.splitext(self.embeddings_path)[0] + '_small.vec'
logging.info('initializing...[ok]')
def build_embedding(self, vocab_dict):
"""
Load embedding vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
num_words = len(vocab_dict)
num_found = 0
with open(self.small_embeddings_path, 'w') as out_file:
with tf.gfile.GFile(self.embeddings_path) as f:
header =next(f)
num_embeddings, embeddings_dim = header.split(' ')
num_embeddings = int(num_embeddings)
out_file.write(header)
for _, line in tqdm(enumerate(f), 'loading embeddings', total=num_embeddings):
tokens = line.rstrip().split(" ")
word = tokens[0]
if word in vocab_dict:
num_found += 1
out_file.write(line)
tf.logging.info("Found embeddings for {} out of {} words in vocabulary".format(num_found, num_words))
def run(self):
self.dataset.load()
X = self.dataset.X_train_labeled['moment'].values
X = np.append(X, self.dataset.X_train_unlabeled['moment'].values, axis=0)
X = np.append(X, self.dataset.X_test['moment'].values, axis=0)
tokenizer = Tokenizer()
tokenizer.fit_on_texts(X)
self.build_embedding(tokenizer.word_index)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
logging.info('initializing task...')
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', default='data/claff-happydb')
parser.add_argument('--embeddings-path', type=str, default=None)
parser.add_argument('--num-unlabeled', type=int, default=1000)
parser.add_argument('--use-allfeats', action='store_true', default=False)
parser.add_argument('--predict', action='store_true', default=True)
builder = EmbeddingsBuilder(args=parser.parse_args())
builder.run()
logging.info('task finished...[ok]')
```
#### File: model/models/base.py
```python
import numpy as np
from gensim.models import Word2Vec
class BaseModel:
def __init__(self, task):
self.args = task.args
self.dataset = task.dataset
def train(self, X, y):
pass
def predict(self, X):
pass
def build_fit_w2v(self, X_text):
w2v_model = Word2Vec()
w2v_model.build_vocab(X_text)
w2v_model.train(
X_text,
total_examples=w2v_model.corpus_count,
epochs=w2v_model.iter
)
return w2v_model
def transform_text_to_w2v(self, wv2_model, X_text):
X_tmp = []
for sentence in X_text:
embeddings = []
for word in sentence.split():
if word in wv2_model:
embeddings.append(wv2_model[word])
if len(embeddings) == 0:
emb_avg = np.zeros(wv2_model.vector_size)
else:
emb_avg = np.average(embeddings, axis=0)
X_tmp.append(emb_avg)
return np.array(X_tmp)
```
#### File: model/models/base_supervised.py
```python
import numpy as np
from sklearn.semi_supervised import LabelSpreading
from tqdm import tqdm
from models.base import BaseModel
class SupervisedBaseModel(BaseModel):
def fit_text(self, X_text, y=None):
pass
def transform_text(self, X_text):
pass
def augment_features(self, X_text, X_allfeats):
return X_text
def augment_instances(self, X_train, y_train):
if self.args.num_unlabeled == 0:
return X_train, y_train
X_unlabeled = self.dataset.X_train_unlabeled
y_unlabeled = self.dataset.y_train_unlabeled
X_unlabeled = X_unlabeled.values
y_unlabeled = y_unlabeled.values
X_train_text = X_train[:, self.args.TEXT_COL]
self.fit_text(X_train_text, y_train)
X_train_rep = self.transform_text(X_train_text)
X_train_rep = self.augment_features(X_train_rep, X_train)
chunk_size = 1000
num_instances = X_unlabeled.shape[0]
num_cols = y_train.shape[1]
for row in tqdm(range(0, self.args.num_unlabeled, chunk_size), desc='spreading labels in rows',
total=int(self.args.num_unlabeled / chunk_size)):
end_row = row + chunk_size
end_row = np.minimum(end_row, num_instances)
for col in tqdm(range(num_cols), desc='spreading labels in cols', leave=False):
X_unlabeled_rep = self.transform_text(X_unlabeled[row:end_row, self.args.TEXT_COL])
X_unlabeled_rep = self.augment_features(X_unlabeled_rep, X_unlabeled[row:end_row, :])
X_spread = np.append(X_train_rep, X_unlabeled_rep, axis=0)
y_spread = np.append(y_train[:, col], y_unlabeled[row:end_row, col], axis=0)
labeling = LabelSpreading()
labeling.fit(X_spread, y_spread)
y_unlabeled[row:end_row, col] = labeling.predict(X_unlabeled_rep)
X_train = np.append(X_train, X_unlabeled[:row + chunk_size], axis=0)
y_train = np.append(y_train, y_unlabeled[:row + chunk_size], axis=0)
return X_train, y_train
```
#### File: model/models/supervised_cnn.py
```python
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers import Embedding
from keras.layers import Conv1D, GlobalMaxPooling1D
from keras.datasets import imdb
from models.supervised_fasttext import FastTextModel
class CnnModel(FastTextModel):
def __init__(self, task):
super(CnnModel, self).__init__(task)
# set parameters:
#self.max_features = 4000
#self.max_len = 400
#self.batch_size = 32
self.filters = 500
self.kernel_size = 5
self.hidden_dims = 500
self.epochs = 10
def build_model(self):
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
weights = None if self.embeddings_matrix is None else [self.embeddings_matrix]
model.add(
Embedding(
self.max_features,
self.embeddings_dim,
input_length=self.max_len,
#mask_zero=True,
weights=weights
)
)
model.add(Dropout(0.5))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(
self.filters,
self.kernel_size,
padding='valid',
activation='relu',
strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(self.hidden_dims))
model.add(Dropout(0.5))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(self.num_labels))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
if __name__ == '__main__':
# set parameters:
max_features = 5000
maxlen = 400
batch_size = 32
embedding_dims = 50
filters = 250
kernel_size = 3
hidden_dims = 250
epochs = 2
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features,
embedding_dims,
input_length=maxlen))
model.add(Dropout(0.2))
# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(Conv1D(filters,
kernel_size,
padding='valid',
activation='relu',
strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())
# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test))
```
#### File: model/models/unsupervised_kmeans_avg.py
```python
import numpy as np
from keras_preprocessing.text import Tokenizer
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from models.base_unsupervised import UnsupervisedBaseModel
class UnsupervisedKmeansAvgBaseModel(UnsupervisedBaseModel):
def __init__(self, task):
super(UnsupervisedKmeansAvgBaseModel, self).__init__(task)
self.num_clusters = 4 # combinations of social and agency
self.clf_model = KMeans(
init='k-means++',
n_clusters=self.num_clusters,
n_init=10,
random_state=self.args.random_state
)
def augment_features(self, X_text, X_all_feats):
if not self.args.use_allfeats:
return X_text
X_all = np.concatenate(
[X_text, X_all_feats[:, 2:]],
axis=1)
return X_all
def train(self, X, y=None):
X, y = self.augment_instances(X, y)
#X_text = self.text_repr_model.fit_transform(X[:, self.args.TEXT_COL])
X_text = X[:, self.args.TEXT_COL]
self.max_features = 4000
self.tokenizer = Tokenizer(num_words=self.max_features)
self.tokenizer.fit_on_texts(X_text)
X_text = self.tokenizer.texts_to_sequences(X_text)
X_text = self.tokenizer.sequences_to_texts(X_text)
self.text_rep_model = self.build_fit_w2v(X_text)
X_text = self.transform_text_to_w2v(self.text_rep_model, X_text)
X_all_feats = self.augment_features(X_text, X)
pca = PCA(
n_components=self.num_clusters,
random_state=self.args.random_state
)
pca.fit(X_all_feats)
model = KMeans(
init=pca.components_,
n_clusters=self.num_clusters,
n_init=1,
random_state=self.args.random_state
)
model.fit(X_all_feats)
self.clf_model = model
def predict(self, X):
X_text = X[:, self.args.TEXT_COL]
#X_text = self.text_rep_model.transform(X[:, self.args.TEXT_COL])
X_text = self.transform_text_to_w2v(self.text_rep_model, X_text)
X_all_feats = self.augment_features(X_text, X)
y_pred = self.clf_model.predict(X_all_feats)
y = y_pred.astype(np.uint8)
y = np.unpackbits(y)
y = y.reshape(y_pred.shape[0],8)
y = y[:, -2:]
y = y[:, ::-1]
return y
```
#### File: model/preprocessing/data_tokenizer.py
```python
import os
import csv
import argparse
import logging
import unicodedata
import nltk
import pandas as pd
import unidecode
from tqdm import tqdm
from nltk.stem import SnowballStemmer, WordNetLemmatizer
from nltk.tokenize import wordpunct_tokenize, word_tokenize
from nltk.tokenize import TweetTokenizer
from preprocessor import tokenize, preprocess, clean
import preprocessor
# preprocessor.set_options(
# preprocessor.OPT.URL,
# preprocessor.OPT.SMILEY,
# preprocessor.OPT.MENTION,
# preprocessor.OPT.EMOJI,
# preprocessor.OPT.NUMBER
# )
class DataTokenizer:
def __init__(self, args):
self.args = args
self.input_file = os.path.expanduser(args.input_file)
self.output_file = os.path.expanduser(args.output_file)
self.text_field = args.text_field
self.lowercase = True
self.tokenizer = args.tokenizer # 0: not tokenize, 1: word_tokenize, 2: punk tokenize, 3: twitter tokenizer
self.stem = False
self.lemmatize = False
self.stemmer = SnowballStemmer("english") if self.stem else None
self.lemmatizer = WordNetLemmatizer() if self.lemmatize else None
self.tweetokenizer = TweetTokenizer(strip_handles=True, reduce_len=True)
stopword_list = nltk.corpus.stopwords.words(args.language)
if 'no' in stopword_list:
stopword_list.remove('no')
if 'not' in stopword_list:
stopword_list.remove('not')
self.stopword_list = stopword_list
if self.tokenizer==3:
self.tweet_tokenizer = TweetTokenizer()
def tokenize_short_text(self, raw_short_text):
short_text = raw_short_text
short_text = short_text.strip()
short_text = unidecode.unidecode(short_text)
if self.lowercase:
short_text = short_text.lower()
if self.tokenizer > 0:
if self.tokenizer == 1:
uttterance_tokens = word_tokenize(short_text)
if self.tokenizer == 2:
uttterance_tokens = wordpunct_tokenize(short_text)
if self.tokenizer == 3:
uttterance_tokens = self.tweet_tokenizer.tokenize(short_text)
if self.tokenizer == 4:
short_text = clean(short_text)
short_text = self.remove_accented_chars(short_text)
uttterance_tokens = self.tweetokenizer.tokenize(short_text)
uttterance_tokens = self.remove_duplicated_sequential_words(uttterance_tokens)
uttterance_tokens = self.remove_stopwords(uttterance_tokens)
if self.stem:
uttterance_tokens = [list(map(self.stemmer.stem, sub)) for sub in uttterance_tokens]
if self.lemmatize:
uttterance_tokens = [[self.lemmatizer.lemmatize(tok, pos='v') for tok in sub] for sub in uttterance_tokens]
short_text = " ".join(uttterance_tokens)
return short_text
def remove_stopwords(self, tokens):
filtered_tokens = [token for token in tokens if token not in self.stopword_list]
return filtered_tokens
def remove_duplicated_sequential_words(self, uttterance_tokens):
i = 0
while i < len(uttterance_tokens):
j = i + 1
while j < len(uttterance_tokens):
if uttterance_tokens[i] == uttterance_tokens[j]:
del uttterance_tokens[j]
else:
break
i += 1
return uttterance_tokens
def remove_accented_chars(self, text):
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore')
return text
def run(self):
ds=pd.read_csv(
self.input_file,
parse_dates=['timestamp'],
dtype={'id': object, 'conversation_id': object, 'in_reply_to_status_id': object}
)
desc = os.path.split(self.input_file)[1]
comments_tokenized = []
for i, row in tqdm(ds.iterrows(), 'tokenize {}'.format(desc), total=ds.shape[0]):
comments_tokenized.append(self.tokenize_short_text(row[self.text_field]))
ds[self.text_field] = comments_tokenized
output_file = os.path.expanduser(self.output_file)
ds.to_csv(output_file, index=False, quoting=csv.QUOTE_ALL)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input-file')
parser.add_argument('--output-file')
parser.add_argument('--language', default='spanish')
parser.add_argument('--text-field', type=str, default='text')
parser.add_argument('--tokenizer', type=int, default=4)
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
builder = DataTokenizer(parser.parse_args())
builder.run()
``` |
{
"source": "johnnytorres/cwcontribution",
"score": 3
} |
#### File: cwcontribution/source/nlp_google.py
```python
import sys
import pandas as pd
import numpy as np
from google.cloud import language
def print_result(annotations):
score = annotations.sentiment.score
magnitude = annotations.sentiment.magnitude
for index, sentence in enumerate(annotations.sentences):
sentence_sentiment = sentence.sentiment.score
print('Sentence {} has a sentiment score of {}'.format(
index, sentence_sentiment))
print('Overall Sentiment: score of {} with magnitude of {}'.format(
score, magnitude))
return 0
print('Sentiment: score of {} with magnitude of {}'.format(
score, magnitude))
return 0
def analyze():
"""Run a sentiment analysis request on text within a passed filename."""
language_client = language.Client()
#filename = '../dataset/wiki/aawd_annotated_sent.csv'
filename = '../dataset/wiki/opinions_annotated_sent.csv'
ds = pd.read_csv(filename)
#ds['sentence'] = ds.text.apply(lambda t: t.replace('.', ' ').replace('\n', ' ').replace('\t', ' '))
#dat = ds[0:100].text.str.cat(sep='. ')
if 'sent_score' not in ds.columns:
ds['sent_score'] = np.nan
ds['sent_magnitude'] = np.nan
counter = 0
for i, row in ds.iterrows():
try:
if not np.isnan(row['sent_score']):
continue
if row['lang'] == 'es':
continue
# Instantiates a plain text document.
dat = row['text']
document = language_client.document_from_text(dat)
# Detects sentiment in the document.
annotations = document.annotate_text(include_sentiment=True,
include_syntax=False,
include_entities=False)
ds.loc[i, 'sent_score'] = annotations.sentiment.score
ds.loc[i, 'sent_magnitude'] = annotations.sentiment.magnitude
counter += 1
print(f"sentence {i} score:{annotations.sentiment.score}")
if counter > 1000:
break
except:
print("Unexpected error:", sys.exc_info()[0])
ds.to_csv(filename, index=False)
# Print the results
# print_result(annotations)
if __name__ == '__main__':
analyze()
``` |
{
"source": "johnnytorres/recsys_twconv_s2s",
"score": 2
} |
#### File: twconvrecsys/data/datasets.py
```python
import os
import subprocess
import keras
def prepare_dataset(args):
if args.data_dir.startswith('gs://') or os.path.exists(args.data_dir):
expand_dirs(args)
return
fname = "{}.zip".format(args.data_dir)
origin = "https://storage.googleapis.com/ml-research-datasets/twconv/{}".format(fname)
cache_subdir = "datasets/{}".format(args.data_dir)
fpath = keras.utils.get_file(fname, origin, cache_subdir=cache_subdir, extract=True)
args.data_dir = os.path.split(fpath)[0]
expand_dirs(args)
def expand_dirs(args):
data_dir = os.path.join(args.data_dir, args.data_subdir)
if args.train_files:
args.train_files = os.path.join(data_dir, args.train_files)
args.eval_files = os.path.join(data_dir, args.eval_files)
args.test_files = os.path.join(data_dir, args.test_files)
args.predict_files = os.path.join(data_dir, args.predict_files) if args.predict_files else None
args.vocab_path = os.path.join(data_dir, args.vocab_path)
args.vocab_processor_path = os.path.join(data_dir, 'vocab_processor.bin')
if args.embedding_path and args.embedding_enabled:
args.embedding_path = os.path.join(args.data_dir, args.embedding_path)
else:
args.embedding_path = None
if not args.job_dir:
args.job_dir = os.path.join(data_dir, 'results', args.estimator)
# get train size
train_csvrecords = os.path.join(data_dir, 'train.csvrecords')
args.train_size = wccount(train_csvrecords)
def wccount(filename):
print('counting lines in file {}'.format(filename))
out = subprocess.Popen(
['wc', '-l', filename],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT
).communicate()[0]
print('TRAIN SIZE', out)
num_instances=int(out.split()[0])-1 # minus header
return num_instances
```
#### File: twconvrecsys/metrics/recall.py
```python
import numpy as np
class RecallEvaluator:
@staticmethod
def _calculate(y_true, y_pred, k=1):
num_examples = len(y_true)
num_correct = 0
for label, predictions in zip(y_true, y_pred):
if label in predictions[:k]:
num_correct+=1
return num_correct/num_examples
@staticmethod
def calculate(y_true, y_pred):
y_pred = np.argsort(y_pred, axis=1)
y_pred = np.fliplr(y_pred)
num_elements = y_pred.shape[1]
klist = np.array( [1, 2, 5, 10])
klist = klist[klist <= num_elements]
metrics = []
for k in klist:
r = RecallEvaluator._calculate(y_true, y_pred, k)
print('recall@({}, {}): {}'.format(k, num_elements, r))
metrics.append(['recall', k, num_elements, r])
return metrics
if __name__ == '__main__':
labels = [1, 2, 2, 0]
predictions = [
[1, 2, 3, 0],
[1, 2, 3, 0],
[2, 3, 1, 0],
[0, 1, 2, 3]
]
score = RecallEvaluator.calculate(labels, predictions)
print ('done')
```
#### File: twconvrecsys/metrics/report.py
```python
import os
import argparse
import pandas as pd
from twconvrecsys.data.csvreader import DataHandler
from twconvrecsys.metrics.ndgc import NdgcEvaluator
from twconvrecsys.metrics.recall import RecallEvaluator
from twconvrecsys.metrics.precision import PrecisionEvaluator
def generate_benchmark(args):
base_results_dir = args.job_dir
base_results_dir = os.path.expanduser(base_results_dir)
results_path = os.path.join(base_results_dir, 'benchmarking.csv')
data_handler = DataHandler()
test = data_handler.load_test_data(args)
if os.path.exists(results_path):
os.remove(results_path)
print('processing...')
benchmark_ds = None
for d in os.walk(base_results_dir):
results_dir = d[0]
if not os.path.isdir(results_dir):
continue
path = os.path.join(results_dir, 'predictions.csv')
if not os.path.exists(path):
continue
dataset, model = os.path.split(results_dir)
_, dataset = os.path.split(dataset)
print(f'{dataset.upper()} - {model.upper()}')
ds = pd.read_csv(path, header=None)
col_names = ['target_{}'.format(i) for i in ds.columns]
ds.columns = col_names
y_pred = ds.values
y_true = test.label.values
metrics = RecallEvaluator.calculate(y_true, y_pred)
metrics_ds = pd.DataFrame(metrics, columns=['metric', 'k', 'N', 'value'])
metrics = PrecisionEvaluator.calculate(y_true, y_pred)
metrics_ds = metrics_ds.append(pd.DataFrame(metrics, columns=['metric', 'k', 'N', 'value']), ignore_index=True)
metrics = NdgcEvaluator.calculate(y_true, y_pred)
metrics_ds = metrics_ds.append(pd.DataFrame(metrics, columns=['metric', 'k', 'N', 'value']), ignore_index=True)
metrics_ds['dataset'] = dataset
metrics_ds['model'] = model
benchmark_ds = metrics_ds if benchmark_ds is None else benchmark_ds.append(metrics_ds, ignore_index=True)
cols = ['dataset', 'model', 'metric', 'k', 'N', 'value']
benchmark_ds[cols].to_csv(results_path, index=False)
print('[OK]')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data-dir', type=lambda path: os.path.expanduser(path))
parser.add_argument('--data-subdir', type=lambda path: os.path.expanduser(path))
parser.add_argument('--job-dir', type=lambda path: os.path.expanduser(path))
generate_benchmark(parser.parse_args())
```
#### File: twconvrecsys/models/random.py
```python
import numpy as np
from twconvrecsys.models.base import BaseConversationRecommender
class RandomConversationRecommender(BaseConversationRecommender):
def __init__(self):
super().__init__()
def _predict(self, source, targets):
n_utt = len(targets)
return np.random.choice(n_utt, n_utt, replace=False)
``` |
{
"source": "johnnyUCSF/hw3",
"score": 2
} |
#### File: hw3/hw3/optimize.py
```python
from hw3 import io
from hw3 import smith_waterman
import random
import copy
#########
def byFirst(elem):
return elem[0]
def calc_total_scores(scoring_matrix,true_pos,true_neg,pen_gap_open,pen_gap_extend):
######note all pairs contains files; each file contains pairs
all_pairs = [true_pos,true_neg]
#####
for file in all_pairs:
for pair in file:
###this contains just the scores
output = smith_waterman.smith_waterman(pair[0],pair[1],scoring_matrix,pen_gap_open,pen_gap_extend)
total_scores.append([output[2],pair])
return(total_scores)
def calc_fitness(FPRs,scoring_matrix,true_pos,true_neg,pen_gap_open,pen_gap_extend):
######takes sum of TPRs for each; max = 4.0
######note all pairs contains files; each file contains pairs
all_pairs = [true_pos,true_neg]
#####calculate alignments based on new scoring matrix
total_scores = []
for file in all_pairs:
for pair in file:
####calculate scores
output = smith_waterman.smith_waterman(pair[0],pair[1],scoring_matrix,pen_gap_open,pen_gap_extend)
total_scores.append([output[2],pair])
#####
total_scores.sort(key=byFirst,reverse=True)
FP_limits = []
#####adjust values
for FPR in FPRs:
FP_limits.append(FPR * len(true_neg))
####init
TPR_sum = 0.0
#####
for FP_limit in FP_limits:
TP = 0.0
FP = 0.0
P = float(len(true_pos))
N = float(len(true_neg))
####sort biggest to smallest
total_scores.sort(key=byFirst,reverse=True)
####scroll through and count TPs and FPs
if FP_limit == 0:
for alignment in total_scores:
if alignment[1] in true_pos:
TP += 1
else:
break
else:
for alignment in total_scores:
if FP >= FP_limit:
break
if alignment[1] in true_pos:
TP += 1
else:
FP += 1
####save the TPR
TPR_sum += (TP/P)
return TPR_sum
########
#########
class Particle:
############
self_C = 0.1
swarm_C = 1.0
jitter = 0.5 ###this is a percentage jitter, jitters at percent of speed_init
speed_init = 1.0
speed_limit = 10.0
fitness = 0.0
fitness_prevbest = 0.0
identities = []
x_curr = [[]]
v_curr = [[]]
x_prevbest = [[]]
x_neighborbest = [[]]
##########initialize x positions by introducing some jitter
def initialize_x(self):
##
for row in range(len(self.x_curr)):
for col in range(len(self.x_curr)):
####this maintains the symetrical property of matrix
if col <= row:
####essentially move them by some amount between 0 and speed init
sign = random.choice([True, False])
if sign == True:
self.x_curr[row][col] = copy.deepcopy(self.x_curr[row][col]) + self.speed_init*random.uniform(0, 1)
else:
self.x_curr[row][col] = copy.deepcopy(self.x_curr[row][col]) - self.speed_init*random.uniform(0, 1)
####make symetrical
tmp = copy.deepcopy(self.x_curr[row][col])
self.x_curr[col][row] = copy.deepcopy(tmp)
return
##########give x_coordinates some jitter
def jitter_x(self):
for row in range(len(self.x_curr)):
for col in range(len(self.x_curr)):
####this maintains the symetrical property of matrix
if col <= row:
####essentially move them by some amount between 0 and speed init
sign = random.choice([True, False])
if sign == True:
self.x_curr[row][col] = copy.deepcopy(self.x_curr[row][col]) + self.jitter*(self.speed_init*random.uniform(0, 1))
else:
self.x_curr[row][col] = copy.deepcopy(self.x_curr[row][col]) - self.jitter*(self.speed_init*random.uniform(0, 1))
####make symetrical
tmp = copy.deepcopy(self.x_curr[row][col])
self.x_curr[col][row] = copy.deepcopy(tmp)
return
##########initialize velocity vector
def initialize_speed(self):
##
self.v_curr = copy.deepcopy(self.x_curr)
##
for row in range(len(self.x_curr)):
for col in range(len(self.x_curr)):
####this maintains the symetrical property of matrix
if col <= row:
sign = random.choice([True, False])
if sign == True:
self.v_curr[row][col] = self.speed_init*random.uniform(0, 1)
else:
self.v_curr[row][col] = -(self.speed_init*random.uniform(0, 1))
####make symetrical
tmp = self.v_curr[row][col]
self.v_curr[col][row] = tmp
return
############split off first row from scoring matrix
def format_identities(self):
self.identities = copy.deepcopy(self.x_curr[0])
self.x_curr.pop(0)
return
############rebuild scoring matrix
def blosum(self):
tmp = copy.deepcopy(self.x_curr)
tmp.insert(0,copy.deepcopy(self.identities))
return(tmp)
#############update position automatically
def update_position(self):
###initialize random numbers
random2 = random.choice([True, False])
random3 = random.choice([True, False])
##
term2 = 0
term3 = 0
##
if random2:
#####calculate self movement vector
term2 = subtract_symetrically(self.x_prevbest,self.x_curr,self.self_C)
##
if random3:
#####calculate best neighbor movement vector
term3 = subtract_symetrically(self.x_neighborbest,self.x_curr,self.swarm_C)
##
tmp = add_symetrically(term2,term3,1)
v_new = add_symetrically(self.v_curr,tmp,1)
###assign velocity
self.v_curr = copy.deepcopy(v_new)
###limit speed
self.limit_velocity()
###
x_new = add_symetrically(self.x_curr,v_new,1)
####assign
self.x_curr = copy.deepcopy(x_new)
####
return
##############limit velocity vector
def limit_velocity(self):
for row in range(len(self.v_curr)):
for col in range(len(self.v_curr)):
#####
if abs(self.v_curr[row][col]) > self.speed_limit:
self.v_curr[row][col] = copy.deepcopy(self.speed_limit)
return
###############
def add_symetrically(x_curr,v_curr,multiplier):
if x_curr == 0:
return(v_curr)
if v_curr == 0:
return(x_curr)
####add symetrically
for row in range(len(x_curr)):
for col in range(len(x_curr)):
####this maintains the symetrical property of matrix
if col <= row:
####add
x_curr[row][col] = (x_curr[row][col]+v_curr[row][col])*multiplier
####make symetrical
tmp = x_curr[row][col]
x_curr[col][row] = tmp
########
return(x_curr)
def subtract_symetrically(x_curr,v_curr,multiplier):
if x_curr == 0:
return(v_curr)
if v_curr == 0:
return(x_curr)
####add symetrically
for row in range(len(x_curr)):
for col in range(len(x_curr)):
####this maintains the symetrical property of matrix
if col <= row:
####add
x_curr[row][col] = (x_curr[row][col]-v_curr[row][col])*multiplier
####make symetrical
tmp = x_curr[row][col]
x_curr[col][row] = tmp
########
return(x_curr)
#########
def particle_swarm(scoring_matrix,FPRs,pen_gap_extend,pen_gap_open,true_pos,true_neg,num_particles,k_max):
######initialize
All_Particles = []
initial_fitness = calc_fitness(FPRs,scoring_matrix,true_pos,true_neg,pen_gap_open,pen_gap_extend)
for i in range(num_particles):
new_particle = Particle()
####start from current blosum matrix
new_particle.x_curr = copy.deepcopy(scoring_matrix)
####split off first row of identities to allow for easy matrix operations
new_particle.format_identities()
####save previous best position
new_particle.x_prevbest = copy.deepcopy(new_particle.x_curr)
new_particle.fitness_prevbest = copy.deepcopy(initial_fitness)
####introduce some jitter
new_particle.initialize_x()
####randomize velocity matrix
new_particle.initialize_speed()
####calc current fitness
new_particle.fitness = calc_fitness(FPRs,new_particle.blosum(),true_pos,true_neg,pen_gap_open,pen_gap_extend)
####save
All_Particles.append(new_particle)
######
k = 0
while k < k_max:
####new velocity matrix
new_particle.initialize_speed()
####update previous best locations for each particle
for particle in All_Particles:
if particle.fitness > particle.fitness_prevbest:
particle.fitness_prevbest = copy.deepcopy(particle.fitness)
particle.x_prevbest = copy.deepcopy(particle.x_curr)
####find best particle in group
max_fitness = -1.0
max_x_best = []
for particle in All_Particles:
if particle.fitness > max_fitness:
max_fitness = copy.deepcopy(particle.fitness)
max_x_best = copy.deepcopy(particle.x_curr)
print(k,max_fitness)
####memorize neighborbest to each particle
for particle in All_Particles:
particle.x_neighborbest = copy.deepcopy(max_x_best)
####for every three , change particle speed
if k % 3 == 0:
particle.initialize_speed()
####update particle positions
for particle in All_Particles:
###particle.jitter_x()
particle.update_position()
particle.fitness = calc_fitness(FPRs,particle.blosum(),true_pos,true_neg,pen_gap_open,pen_gap_extend)
print(particle.fitness)
###
k += 1
########return particle with best fitness
max_fitness = -1.0
max_x_neighborbest = []
for particle in All_Particles:
if particle.fitness > max_fitness:
print('okay! ',particle.fitness,max_fitness)
max_fitness = copy.deepcopy(particle.fitness)
max_x_neighborbest = copy.deepcopy(particle.blosum())
return(max_fitness,max_x_neighborbest,All_Particles)
########
``` |
{
"source": "JohnnyUtah-9/calculatorclassproject",
"score": 2
} |
#### File: calculatorclassproject/Calculator/Sqrt.py
```python
def sqrt(a):
return a ** .5
```
#### File: calculatorclassproject/Decorator/decorator.py
```python
def do_twice(func):
def wrapper_do_twice():
func()
func()
return
```
#### File: calculatorclassproject/Stats/MeanPopu.py
```python
from Calculator.Division import division
def populationmean(num):
try:
num_values = len(num)
total = sum(num)
return division(total, num_values)
except ZeroDivisionError:
print("Error: Cannot Divide by 0")
except ValueError:
print("Check your data inputs")
```
#### File: calculatorclassproject/Stats/sdS.py
```python
from Calculator.Sqrt import sqrt
from Stats.VarP import variance
def samplestddev(num):
try:
variance_float = variance(num)
return round(sqrt(variance_float), 5)
except ZeroDivisionError:
print("Can't Divide by 0 Error")
except ValueError:
print("Please Check your data inputs")
``` |
{
"source": "JohnnyUtah-9/SidneyJohnsonCalculator",
"score": 4
} |
#### File: SidneyJohnsonCalculator/src/Calculator.py
```python
from Addition import addition
from Multiplication import multiplication
from Square_Root import sqrt
from Square import square
from Division import division
from Subtraction import subtraction
class Calculator:
result = 0
def __init__(self):
self.result = 1
pass
def add(self, a, b):
self.result = addition(a, b)
return self.result
def subtract(self, a, b):
self.result = subtraction(a, b)
return self.result
def multiply(self, a, b):
self.result = multiplication(a, b)
return self.result
def divide(self, a, b):
self.result = division(a, b)
return self.result
def square(self, a):
self.result = square(a)
return self.result
def square_root(self, a):
self.result = sqrt(a)
return self.result
```
#### File: SidneyJohnsonCalculator/src/Square_Root.py
```python
def sqrt(a):
return int(a) ** .5
``` |
{
"source": "johnnyvargast/django-model-logs",
"score": 2
} |
#### File: django-model-logs/model_log/apps.py
```python
from django.apps import AppConfig
class ModelLogConfig(AppConfig):
name = 'model_log'
def ready(self):
from .setup import register_signals
register_signals()
```
#### File: django-model-logs/model_log/middleware.py
```python
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError:
MiddlewareMixin = object
from . import _local
class ModelLogMiddleware(MiddlewareMixin):
"""Expose request to the local variable.
This middleware sets request as a local thread variable, making it
available to the methods to allow tracking of the authenticated user
making a change.
"""
def process_request(self, request):
_local.request = request
def process_response(self, request, response):
if hasattr(_local, "request"):
del _local.request
return response
```
#### File: django-model-logs/model_log/models.py
```python
from django.db import models
class Log(models.Model):
ACTIONS = (
('C', "Created"),
('U', "Updated"),
('D', "Deleted")
)
date_created = models.DateTimeField("Date created", db_index=True, auto_now_add=True,
help_text="The date and time this changes was.")
object_id = models.IntegerField(help_text="Primary key of the object under version control.")
model = models.CharField(help_text="Model of the object under version control.", max_length=128)
data = models.TextField(blank=True, null=True, help_text="The data being changed.")
action = models.CharField("Action", choices=ACTIONS, help_text='created|updated|deleted', max_length=1)
user = models.TextField(blank=True, null=True, help_text='ID of the user who makes the action')
def __str__(self):
text = "Changes on {} of {} at {}".format(
self.object_id,
self.model,
self.date_created.strftime('%Y-%m-%d %H:%M'))
return text
@property
def action_label(self):
return dict(self.ACTIONS)[self.action]
class Meta:
ordering = ("-pk",)
```
#### File: django-model-logs/model_log/utils.py
```python
from django.contrib.contenttypes.models import ContentType
from .models import Log
from . import _local
def model_to_dict(instance):
opts = instance._meta
ignore_fields = getattr(instance, 'LOGGING_IGNORE_FIELDS', [])
only_fields = getattr(instance, 'LOGGING_ONLY_FIELDS', [])
if only_fields:
field_names = [f.attname for f in opts.fields if f.name in only_fields]
elif ignore_fields:
field_names = [f.attname for f in opts.fields if f.name not in ignore_fields]
else:
field_names = [f.attname for f in opts.fields]
data = {f: getattr(instance, f, None) for f in field_names}
return data
def diff(obj):
d1 = obj.__attrs
d2 = model_to_dict(obj)
diffs = [(k, d2[k]) for k, v in d1.items() if v != d2[k]]
return dict(diffs)
def create_log(obj, action, db, data=None):
user = _local.request.user.id if _local.request else "System User"
if data or action == 'C':
log_data = {
'object_id': obj.pk,
'model': ContentType.objects.get_for_model(obj._meta.model).model,
'action': action,
'data': data,
'user': user
}
Log.objects.using(db).create(**log_data)
``` |
{
"source": "johnnyvf24/kalah-alpha-zero",
"score": 2
} |
#### File: kalah_zero/worker/evaluate.py
```python
import os
from logging import getLogger
from random import random
from time import sleep
from kalah_zero.agent.model_kalah import KalahModel
from kalah_zero.agent.player_kalah import KalahPlayer
from kalah_zero.config import Config
from kalah_zero.env.kalah_env import KalahEnv, Winner, Player
from kalah_zero.lib import tf_util
from kalah_zero.lib.data_helper import get_next_generation_model_dirs
from kalah_zero.lib.model_helpler import save_as_best_model, load_best_model_weight
logger = getLogger(__name__)
def start(config: Config):
tf_util.set_session_config(per_process_gpu_memory_fraction=0.2)
return EvaluateWorker(config).start()
class EvaluateWorker:
def __init__(self, config: Config):
"""
:param config:
"""
self.config = config
self.best_model = None
def start(self):
self.best_model = self.load_best_model()
while True:
ng_model, model_dir = self.load_next_generation_model()
logger.debug(f"start evaluate model {model_dir}")
ng_is_great = self.evaluate_model(ng_model)
if ng_is_great:
logger.debug(f"New Model become best model: {model_dir}")
save_as_best_model(ng_model)
self.best_model = ng_model
self.remove_model(model_dir)
def evaluate_model(self, ng_model):
results = []
winning_rate = 0
for game_idx in range(self.config.eval.game_num):
# ng_win := if ng_model win -> 1, lose -> 0, draw -> None
ng_win, white_is_best = self.play_game(self.best_model, ng_model)
if ng_win is not None:
results.append(ng_win)
winning_rate = sum(results) / len(results)
logger.debug(f"game {game_idx}: ng_win={ng_win} white_is_best_model={white_is_best} "
f"winning rate {winning_rate*100:.1f}%")
if results.count(0) >= self.config.eval.game_num * (1-self.config.eval.replace_rate):
logger.debug(f"lose count reach {results.count(0)} so give up challenge")
break
if results.count(1) >= self.config.eval.game_num * self.config.eval.replace_rate:
logger.debug(f"win count reach {results.count(1)} so change best model")
break
winning_rate = sum(results) / len(results)
logger.debug(f"winning rate {winning_rate*100:.1f}%")
return winning_rate >= self.config.eval.replace_rate
def play_game(self, best_model, ng_model):
env = KalahEnv().reset()
best_player = KalahPlayer(self.config, best_model, play_config=self.config.eval.play_config)
ng_player = KalahPlayer(self.config, ng_model, play_config=self.config.eval.play_config)
best_is_white = random() < 0.5
if not best_is_white:
black, white = best_player, ng_player
else:
black, white = ng_player, best_player
env.reset()
while not env.done:
if env.player_turn == Player.black:
action = black.action(env.board, env.player_turn, env.moves_made)
else:
action = white.action(env.board, env.player_turn, env.moves_made)
env.step(action)
ng_win = None
if env.winner == Winner.white:
if best_is_white:
ng_win = 0
else:
ng_win = 1
elif env.winner == Winner.black:
if best_is_white:
ng_win = 1
else:
ng_win = 0
return ng_win, best_is_white
def load_best_model(self):
model = KalahModel(self.config)
load_best_model_weight(model)
return model
def load_next_generation_model(self):
rc = self.config.resource
while True:
dirs = get_next_generation_model_dirs(self.config.resource)
if dirs:
break
logger.info(f"There is no next generation model to evaluate")
sleep(60)
model_dir = dirs[-1] if self.config.eval.evaluate_latest_first else dirs[0]
config_path = os.path.join(model_dir, rc.next_generation_model_config_filename)
weight_path = os.path.join(model_dir, rc.next_generation_model_weight_filename)
model = KalahModel(self.config)
model.load(config_path, weight_path)
return model, model_dir
def remove_model(self, model_dir):
rc = self.config.resource
config_path = os.path.join(model_dir, rc.next_generation_model_config_filename)
weight_path = os.path.join(model_dir, rc.next_generation_model_weight_filename)
os.remove(config_path)
os.remove(weight_path)
os.rmdir(model_dir)
``` |
{
"source": "johnnyvnr/ZapBot",
"score": 3
} |
#### File: ZapBot/zapbot/zapbot.py
```python
from selenium import webdriver
import time
# Necesário estar instalado o Selenium e no PC e o ChromeWebDriver estar na pasta do ZapBot.
# Substitua o Grupo de Exemplo1 e Grupo de Exemplo2 pelos grupos desejados.
class zapBot:
def __init__(self):
self.message = "Bom dia família, to testando um bot aqui."
self.groups = ["Grupo de Exemplo1", "Grupo de exemplo2"]
options = webdriver.ChromeOptions()
options.add_argument('lang=pt-br')
self.driver = webdriver.Chrome(executable_path=r'./chromedriver.exe')
def sendMsg(self):
#<span dir="auto" title="Grupo de Exemplo1" class="xxxxx">Grupo de Exemplo1</span>
#<div tabindex="-1" class="xxx">
#<span data-testid="send" data-icon="send" class="">
#É necessário trocar os códigos HTML pelos códigos presentes em seu PC
self.driver.get('https://web.whattsapp.com')
time.sleep(30)
for group in self.groups:
group = self.driver.find_element_by_xpath(f"//span[@title='{group}']")
time.sleep(3)
group.click()
chatBox = self.driver.find_element_by_class_name('xxx')
time.sleep(3)
chatBox.click()
chatBox.send_keys(self.message)
sendButton = self.driver.find_element_by_xpath("//span[@data-icon='send']")
time.sleep(3)
sendButton.click()
time.sleep(3)
bot = zapBot()
bot.sendMsg()
``` |
{
"source": "johnny-walker/FGVC",
"score": 2
} |
#### File: FGVC/onnx/grid_sampler.py
```python
import torch
import torch.nn.functional as F
from mmcv.ops.point_sample import bilinear_grid_sample
# sample input and grid
x = torch.randn(1, 4, 10, 10)
grid = 2*torch.rand(1, 8, 8, 2) - 1 # scale as (-1, 1)
# reference output
ref = F.grid_sample(x, grid, align_corners=False)
# substitute output
out = bilinear_grid_sample(x, grid, align_corners=False)
# almost the same
print(ref - out)
# Toy model including blinear_grid_sampler
class Sampler(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, grid):
return bilinear_grid_sample(x, grid, align_corners=False)
torch.onnx.export(
Sampler(),
(x, grid),
'bilinear_sampler.onnx',
verbose=True,
input_names=['input', 'grid'],
output_names=['output']
)
# validate the converted onnx operation
import onnxruntime as ort
sess = ort.InferenceSession('bilinear_sampler.onnx')
outputs = sess.run(None, {'input': x.numpy(), 'grid': grid.numpy()})
out_onnx = outputs[0]
# almost the same
print(ref - out_onnx)
```
#### File: FGVC/onnx/onnx_RAFT.py
```python
import os
import sys
import time
sys.path.append(os.path.abspath(os.path.join(__file__, '..', '..')))
from RAFT import utils
from RAFT import RAFT
import torch
import argparse
print(torch.__version__)
isONNX = True
if isONNX:
import onnx
import onnxruntime as onnxrun
else:
from openvino.inference_engine import IECore
from PIL import Image
import numpy as np
import glob
DEVICE = 'cpu'
def initialize_RAFT(args):
"""Initializes the RAFT model.
"""
model = torch.nn.DataParallel(RAFT(args))
model.load_state_dict(torch.load(args.model, map_location=torch.device(DEVICE)) )
model = model.module
model.to(DEVICE)
model.eval()
return model
def convert_to_ONNX(args):
RAFT_model = initialize_RAFT(args)
# set the model to inference mode
RAFT_model.eval()
dummy_input = (torch.randn(1, 3, 720, 1280, device=DEVICE), torch.randn(1, 3, 720, 1280, device=DEVICE))
input_names = ('image1', 'image2')
output_names = ('flow')
torch.onnx.export(RAFT_model,
dummy_input ,
args.onnx_model,
input_names = input_names,
output_names = output_names,
opset_version = 11)
def check_model(args):
model = onnx.load(args.onnx_model)
onnx.checker.check_model(model)
print(onnx.helper.printable_graph(model.graph))
def create_dir(dir):
"""Creates a directory if not exist.
"""
if not os.path.exists(dir):
os.makedirs(dir)
def read_frames():
# Loads frames.
filename_list = glob.glob(os.path.join(args.path, '*.png')) + \
glob.glob(os.path.join(args.path, '*.jpg'))
# debug, Obtains imgH, imgW and nFrame.
#imgH, imgW = np.array(Image.open(filename_list[0])).shape[:2]
#nFrame = len(filename_list)
# Loads video.
video = []
for filename in sorted(filename_list):
frame = Image.open(filename)
frame = np.array(frame).astype(np.float32)
frame = np.transpose(frame, (2, 0, 1))
video.append(frame)
video = np.stack(video, axis=0)
return video, filename_list
def infer_flow_onnx(args):
video, filename_list = read_frames()
create_dir(os.path.join(args.outroot+'_flow', '_flo'))
create_dir(os.path.join(args.outroot+'_flow', '_png'))
# inference
sess = onnxrun.InferenceSession(args.onnx_model)
#inputs = sess.get_inputs()
input_image1 = sess.get_inputs()[0].name
input_image2 = sess.get_inputs()[1].name
if sess is not None:
for idx in range(len(video)-1):
start = time.time()
filename = os.path.split(filename_list[idx])[-1]
filename = os.path.splitext(filename)[0]
#filename = + '%05d'%idx
image1 = video[idx, None]
image2 = video[idx+1, None]
result = sess.run( None, { input_image1: image1,
input_image2: image2
} )
print(time.time()-start)
flow = result[0]
flow = flow.reshape((-1, flow.shape[2], flow.shape[3]))
flow = np.transpose(flow, (1, 2, 0))
flo_path = os.path.join(args.outroot+'_flow', '_flo', filename + '.flo')
Image.fromarray(utils.flow_viz.flow_to_image(flow)).save(os.path.join(args.outroot+'_flow','_png', filename + '.png'))
utils.frame_utils.writeFlow(flo_path, flow)
def infer_flow_openvino(args):
video, filename_list = read_frames()
create_dir(os.path.join(args.outroot+'_flow', '_flo'))
create_dir(os.path.join(args.outroot+'_flow', '_png'))
# inference
ie = IECore()
net = ie.read_network(model=args.onnx_model)
# Loading the network to the inference engine
exec_net = ie.load_network(network=net, device_name="CPU")
input_blobs = []
for item in net.input_info:
input_blobs.append(item)
net_outputs = list(net.outputs.keys())
if exec_net is not None:
for idx in range(len(video)-1):
filename = os.path.split(filename_list[idx])[-1]
filename = os.path.splitext(filename)[0]
#filename = + '%05d'%idx
image1 = video[idx, None]
image2 = video[idx+1, None]
inputs = { input_blobs[0]: image1, input_blobs[1]: image2 }
asyncInference = True
start = time.time()
if asyncInference:
exec_net.requests[0].async_infer(inputs)
request_status = exec_net.requests[0].wait()
print(request_status)
flow = exec_net.requests[0]
else:
# system hangs, not working
outputs = exec_net.requests[0].infer(inputs)
flow = outputs[net_outputs[0]]
print(time.time()-start)
flow = flow.reshape((-1, flow.shape[2], flow.shape[3]))
flow = np.transpose(flow, (1, 2, 0))
flo_path = os.path.join(args.outroot+'_flow', '_flo', filename + '.flo')
Image.fromarray(utils.flow_viz.flow_to_image(flow)).save(os.path.join(args.outroot+'_flow','_png', filename + '.png'))
utils.frame_utils.writeFlow(flo_path, flow)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# RAFT
parser.add_argument('--model', default='weight/raft-things.pth', help="restore checkpoint")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--alternate_corr', action='store_true', help='use efficent correlation implementation')
parser.add_argument('--onnx_model', default='weight_onnx/raft.onnx', help="saving onnx model name and path")
parser.add_argument('--path', default='data/beach', help="soruce path")
parser.add_argument('--outroot', default='data/beach', help="flo out path")
args = parser.parse_args()
if isONNX:
if not os.path.exists(args.onnx_model) :
# create folder
folder = ''
splits = os.path.split(args.onnx_model)
for i in range(len(splits)-1):
folder = os.path.join(folder, splits[i])
create_dir(folder)
# convert to onnx
convert_to_ONNX(args)
check_model(args)
infer_flow_onnx(args) # slow
else:
# already convreted .onnx file successfully, load directly
infer_flow_openvino(args)
```
#### File: FGVC/onnx/pwc_openvino.py
```python
import argparse
import cv2
import os
import time
import glob
import numpy as np
from openvino.inference_engine import IECore
import pwc_utils
def create_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def reshape_input(net, pair):
# Call reshape, but PWCNet seems not working
x = np.array([pair])
# padding for 64 alignment
print('frame.shape:', x.shape)
x_adapt, _ = pwc_utils.adapt_x(x)
x_adapt = x_adapt.transpose((0, 4, 1, 2, 3)) # B2HWC --> BC2HW
print('adapt.shape:', x_adapt.shape)
print(f"Input shape: {net.input_info['x_tnsr'].tensor_desc.dims}")
net.reshape({'x_tnsr': x_adapt.shape})
print(f"Input shape (new): {net.input_info['x_tnsr'].tensor_desc.dims}")
def load_to_IE(model_xml, pair):
model_bin = os.path.splitext(model_xml)[0] + ".bin"
ie = IECore()
net = ie.read_network(model=model_xml, weights=model_bin)
#reshape_input(net, pair)
exec_net = ie.load_network(network=net, device_name="CPU")
print("IR successfully loaded into Inference Engine.")
del net
'''
for item in exec_net.input_info:
print('input:', item)
for key in list(exec_net.outputs.keys()):
print('output:', key)
'''
return exec_net
def read_frames(args):
filename_list = glob.glob(os.path.join(args.input, '*.png')) + \
glob.glob(os.path.join(args.input, '*.jpg'))
video = []
for filename in sorted(filename_list):
frame = cv2.imread(filename)
frame = np.array(frame).astype(np.float32) / 255. # normalize to range (0.0, 1.0)
#frame = np.transpose(frame, (2, 0, 1)) # HWC -> CHW
video.append(frame)
#video = np.stack(video, axis=0) # 1CHW
return video
def inference(args):
'''
Performs inference on an input image, given an ExecutableNetwork
'''
video = read_frames(args)
exec_net = load_to_IE(args.model, video[0])
input_size = ( args.height, args.width )
video_size = video[0].shape[:2]
# resize to input, padding for 64 alignment
print('original frame shape:', video[0].shape)
image1, x_unpad_info = pwc_utils.resize_to_fit(video[0], input_size) # return HWC
print('adapt.shape:', image1.shape)
image2 = None
for idx in range(len(video)-1):
image2, _ = pwc_utils.resize_to_fit( video[idx+1], input_size) # return HWC
# Repackage input image pairs as np.ndarray
x_adapt = np.array([(image1, image2)]) # --> B2HWC
x_adapt = np.array([(video[idx], video[idx+1])])
#print (x_adapt.shape)
x_adapt = np.array([[image1, image2]]) # --> B2HWC
x_adapt = x_adapt.transpose((0, 4, 1, 2, 3)) # B2HWC --> BC2HW
# inference
start = time.time()
y_hat = exec_net.infer({'x_tnsr':x_adapt})
print(time.time()-start)
image1 = image2
# restore to orignal resolution, cut off the padding
y_adapt = y_hat['pwcnet/flow_pred']
y_adapt = y_adapt.transpose((0, 2, 3, 1)) # BCHW --> BHWC
flow = np.squeeze(y_adapt, axis=0) #BHWC --> HWC
flow = pwc_utils.unpad_and_upscale(flow, x_unpad_info, video_size)
print (flow.shape)
dir = f'output_{args.height}x{args.width}'
create_dir(dir)
save_name = f'{dir}/{idx:05d}.png'
cv2.imwrite(save_name, pwc_utils.flow_to_img(flow))
def get_args():
'''
Gets the arguments from the command line.
'''
parser = argparse.ArgumentParser("Load an IR into the Inference Engine")
model_desc = "location of the model XML file"
input_desc = "location of the image input"
parser.add_argument("--input", default='../data/tennis', help=input_desc)
parser.add_argument("--model", default='../models/model_ir_384x640/pwc_frozen.xml', help=model_desc)
parser.add_argument("--height", default=384, type=int, help='model input height')
parser.add_argument("--width", default=640, type=int, help='model input width')
args = parser.parse_args()
conf = [{'model': '../models/model_ir_384x640/pwc_frozen.xml',
'height': 384,
'width': 640},
{'model': '../models/model_ir_448x768/pwc_frozen.xml',
'height': 448,
'width': 768 },
{'model': '../models/model_ir_640x832/pwc_frozen.xml',
'height': 640,
'width': 832 },
{'model': '../models/model_ir_768x1024/pwc_frozen.xml',
'height': 768,
'width': 1024 },
{'model': '../models/model_ir_768x1280/pwc_frozen.xml',
'height': 768,
'width': 1280 },
]
opt = 0
args.model = conf[opt]['model']
args.height = conf[opt]['height']
args.width = conf[opt]['width']
return args
def main():
args = get_args()
inference(args)
if __name__ == "__main__":
main()
```
#### File: SAT/pgm01/ObjRemover.py
```python
import os
import sys
sys.path.append(os.path.abspath(os.path.join(__file__, '..', '..', '..')))
import argparse
import os
import cv2
import glob
import numpy as np
import torch
import imageio
from PIL import Image
import scipy.ndimage
import torchvision.transforms.functional as F
import time
from tool.get_flowNN import get_flowNN
from tool.spatial_inpaint import spatial_inpaint
from tool.frame_inpaint import DeepFillv1
import utils.region_fill as rf
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
from tool.cvflow import CVFlowPredictor
print("cuda device not found, using cpu...")
class VObjRemover():
args = None
imgHeight = 720
imgWidth = 1280
nFrame = 0
video = []
mask = []
flow_mask = []
def __init__(self, args):
self.args = args
def create_dir(self, dir):
"""Creates a directory if not exist.
"""
if not os.path.exists(dir):
os.makedirs(dir)
def initialize_CVFlow(self):
model = CVFlowPredictor()
return model
def infer_flow(self, mode, filename, image1, image2, imgH, imgW, model):
if DEVICE == 'cpu':
frame1 = image1.reshape((-1, imgH, imgW)).cpu().numpy()
frame1 = np.transpose(frame1, (1, 2, 0)).copy()
frame2 = image2.reshape((-1, imgH, imgW)).cpu().numpy()
frame2 = np.transpose(frame2, (1, 2, 0)).copy()
flow = model.predict(frame1, frame2)
#model.write_viz(os.path.join(self.args.outroot, 'flow', mode + '_png', filename + '.png'), flow)
else:
# original uters = 12
_, flow = model(image1, image2, iters=int(self.args.iteration), test_mode=True)
flow = flow[0].permute(1, 2, 0).cpu().numpy()
return flow
def calculate_flow(self, model, video):
"""Calculates optical flow.
"""
start = time.time()
nFrame, _, imgH, imgW = video.shape
FlowF = np.empty(((imgH, imgW, 2, 0)), dtype=np.float32)
FlowB = np.empty(((imgH, imgW, 2, 0)), dtype=np.float32)
FlowNLF = np.empty(((imgH, imgW, 2, 3, 0)), dtype=np.float32)
FlowNLB = np.empty(((imgH, imgW, 2, 3, 0)), dtype=np.float32)
mode_list = ['forward', 'backward']
for mode in mode_list:
with torch.no_grad():
for i in range(nFrame):
if mode == 'forward':
if i == nFrame - 1:
continue
# Flow i -> i + 1
print("Calculating {0} flow {1:2d} <---> {2:2d}".format(mode, i, i + 1), '\r', end='')
image1 = video[i, None]
image2 = video[i + 1, None]
flow = self.infer_flow(mode, '%05d'%i, image1, image2, imgH, imgW, model)
FlowF = np.concatenate((FlowF, flow[..., None]), axis=-1)
elif mode == 'backward':
if i == nFrame - 1:
continue
# Flow i + 1 -> i
print("Calculating {0} flow {1:2d} <---> {2:2d}".format(mode, i, i + 1), '\r', end='')
image1 = video[i + 1, None]
image2 = video[i, None]
flow = self.infer_flow(mode, '%05d'%i, image1, image2, imgH, imgW, model)
FlowB = np.concatenate((FlowB, flow[..., None]), axis=-1)
print('Finish flow calculation. Consuming time:', time.time() - start)
return FlowF, FlowB, FlowNLF, FlowNLB
def complete_flow(self, corrFlow, flow_mask, mode):
"""Completes flow.
"""
if mode not in ['forward', 'backward']:
raise NotImplementedError
sh = corrFlow.shape
nFrame = sh[-1]
compFlow = np.zeros(((sh)), dtype=np.float32)
for i in range(nFrame):
print("Completing {0} flow {1:2d} <---> {2:2d}".format(mode, i, i + 1), '\r', end='')
flow = corrFlow[..., i]
if mode == 'forward':
flow_mask_img = flow_mask[:, :, i]
elif mode == 'backward':
flow_mask_img = flow_mask[:, :, i + 1]
if mode == 'forward' or mode == 'backward':
flow[:, :, 0] = rf.regionfill(flow[:, :, 0], flow_mask_img)
flow[:, :, 1] = rf.regionfill(flow[:, :, 1], flow_mask_img)
compFlow[:, :, :, i] = flow
return compFlow
def convertData(self, video, masks):
# Obtains imgH, imgW and nFrame.
self.imgHeight, self.imgWidth = video[0].shape[:2]
self.nFrame = len(video)
# convert video frames
self.video = []
for frame in video:
# convert to CHW
frm = torch.from_numpy(frame)[..., :3].permute(2, 0, 1).float()
self.video.append(frm)
self.video = torch.stack(self.video, dim=0)
self.video = self.video.to(DEVICE)
# convert masks.
self.mask = []
self.flow_mask = []
for mask in masks:
mask_img = cv2.cvtColor(mask, cv2.COLOR_BGR2GRAY)
self.mask.append(mask_img)
# Dilate 15 pixel so that all known pixel is trustworthy
flow_mask_img = scipy.ndimage.binary_dilation(mask_img, iterations=15)
# Close the small holes inside the foreground objects
flow_mask_img = cv2.morphologyEx(flow_mask_img.astype(np.uint8), cv2.MORPH_CLOSE, np.ones((21, 21),np.uint8)).astype(bool)
flow_mask_img = scipy.ndimage.binary_fill_holes(flow_mask_img).astype(bool)
self.flow_mask.append(flow_mask_img)
def inference(self, callback):
begin = time.time()
# Flow model.
RAFT_model = self.initialize_CVFlow()
# Calcutes the corrupted flow.
corrFlowF, corrFlowB, _, _ = self.calculate_flow(RAFT_model, self.video)
#print('\nFinish flow prediction.')
start = time.time()
# Makes sure video is in BGR (opencv) format.
video = self.video.permute(2, 3, 1, 0).cpu().numpy()[:, :, ::-1, :] / 255.
# mask indicating the missing region in the video.
mask = np.stack(self.mask, -1).astype(bool)
flow_mask = np.stack(self.flow_mask, -1).astype(bool)
print('\nFinish filling mask holes. Consuming time:', time.time() - start)
# Completes the flow.
videoFlowF = corrFlowF
videoFlowB = corrFlowB
start = time.time()
videoFlowF = self.complete_flow(corrFlowF, flow_mask, 'forward')
videoFlowB = self.complete_flow(corrFlowB, flow_mask, 'backward')
print('\nFinish flow completion. Consuming time:', time.time() - start)
iter = 0
mask_tofill = mask
video_comp = video
nFrame = self.nFrame
imgH = self.imgHeight
imgW = self.imgWidth
# Image inpainting model.
deepfill = DeepFillv1(pretrained_model=self.args.deepfill_model, image_shape=[imgH, imgW])
# We iteratively complete the video.
while(np.sum(mask_tofill) > 0):
start = time.time()
print('iteration:', iter)
#self.create_dir(os.path.join(self.args.outroot, 'frame_comp_' + str(iter)))
# Color propagation.
video_comp, mask_tofill, _ = get_flowNN(self.args, video_comp, mask_tofill,
videoFlowF, videoFlowB, None, None)
print('\nFinish color propagation. Consuming time:', time.time() - start)
for i in range(nFrame):
mask_tofill[:, :, i] = scipy.ndimage.binary_dilation(mask_tofill[:, :, i], iterations=2)
img = video_comp[:, :, :, i] * 255
# Green indicates the regions that are not filled yet.
img[mask_tofill[:, :, i]] = [0, 255, 0]
callback(img)
#cv2.imwrite(os.path.join(self.args.outroot, 'frame_comp_' + str(iter), '%05d.png'%i), img)
start = time.time()
# do color propagation at most n+1 times
if self.args.inpainting or iter >= self.args.nProgagating:
mask_tofill, video_comp = spatial_inpaint(deepfill, mask_tofill, video_comp, nFrame)
break
else:
mask_tofill, video_comp = spatial_inpaint(deepfill, mask_tofill, video_comp)
iter += 1
print('Total consuming time:', time.time() - begin)
finalname = os.path.split(self.args.path)[-1]
self.create_dir(os.path.join(self.args.outroot, 'frame_comp_' + 'final'))
video_comp_ = (video_comp * 255).astype(np.uint8).transpose(3, 0, 1, 2)[:, :, :, ::-1]
# save mp4
filename = os.path.join(self.args.outroot, 'frame_comp_' + 'final', finalname+'.mp4')
imageio.mimwrite(filename, video_comp_, fps=15, quality=8, macro_block_size=1)
print('saved file:', filename)
def loadData(args):
# load data frames
videoFrames = []
filename_list = glob.glob(os.path.join(args.path, '*.png')) + \
glob.glob(os.path.join(args.path, '*.jpg'))
for filename in sorted(filename_list):
frame = cv2.imread(filename)
videoFrames.append(frame)
# load mask
maskFrames = []
filename_list = glob.glob(os.path.join(args.path_mask, '*.png')) + \
glob.glob(os.path.join(args.path_mask, '*.jpg'))
for filename in sorted(filename_list):
frame_mask = cv2.imread(filename)
maskFrames.append(frame_mask)
return videoFrames, maskFrames
def callback(frame):
print(frame.shape)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# following args are required
# video completion
parser.add_argument('--mode', default='object_removal', help="modes: object_removal / video_extrapolation")
parser.add_argument('--path', default='data/beach', help="dataset for evaluation")
parser.add_argument('--path_mask', default='data/beach_mask', help="mask for object removal")
parser.add_argument('--outroot', default='data/vc', help="output directory")
parser.add_argument('--consistencyThres', dest='consistencyThres', default=np.inf, type=float, help='flow consistency error threshold')
parser.add_argument('--alpha', dest='alpha', default=0.1, type=float)
parser.add_argument('--Nonlocal', action='store_true', help='Whether use edge as guidance to complete flow')
parser.add_argument('--deepfill_model', default='weight/imagenet_deepfill.pth', help="restore checkpoint")
# extra optional args
parser.add_argument('--inpainting', action='store_true', help='all the remaining unknown pixels apply inpainting')
parser.add_argument('--nProgagating', default=2, help="do color progagating at most n+1 time")
args = parser.parse_args()
video, masks = loadData(args)
print (video[0].shape, masks[0].shape)
vObjRemover = VObjRemover(args)
vObjRemover.convertData(video, masks)
vObjRemover.inference(callback)
```
#### File: SAT/pgm01/ObjTracker.py
```python
import argparse
import os
import time
import glob
from tkinter.constants import N
import cv2
import numpy as np
import torch
from videoanalyst.config.config import cfg, specify_task
#from videoanalyst.engine.monitor.monitor_impl.utils import (labelcolormap, mask_colorize)
from videoanalyst.model import builder as model_builder
from videoanalyst.pipeline import builder as pipeline_builder
#from videoanalyst.utils.image import ImageFileVideoStream, ImageFileVideoWriter
#from videoanalyst.utils.visualization import VideoWriter
class VideoTracker:
pipeline = None
polygon_points = []
videoFrames = []
threshold = 0.5
def __init__(self, args):
self.initModel(args)
def initModel(self, args):
root_cfg = cfg
root_cfg.merge_from_file(args.config)
# resolve config
root_cfg = root_cfg.test
task, task_cfg = specify_task(root_cfg)
task_cfg.freeze()
# build model
tracker_model = model_builder.build("track", task_cfg.tracker_model)
tracker = pipeline_builder.build("track",
task_cfg.tracker_pipeline,
model=tracker_model)
segmenter = model_builder.build('vos', task_cfg.segmenter)
# build pipeline
self.pipeline = pipeline_builder.build('vos',
task_cfg.pipeline,
segmenter=segmenter,
tracker=tracker)
dev = torch.device('cpu')
self.pipeline.set_device(dev)
def initData(self, videoFrames, points, threshold=0.01):
if self.pipeline is not None:
self.videoFrames = videoFrames
self.polygon_points = points
self.threshold = threshold
return True
return False
def segmentFrames(self, callback):
# init box and mask
init_mask = None
init_box = None
first_frame = self.videoFrames[0]
np_pts = np.array(self.polygon_points)
init_box = cv2.boundingRect(np_pts)
zero_mask = np.zeros((first_frame.shape[0], first_frame.shape[1]), dtype=np.uint8)
init_mask = cv2.fillPoly(zero_mask, [np_pts], (1, ))
self.pipeline.init(first_frame, init_box, init_mask)
frame_idx = 0
for frame in self.videoFrames:
time_a = time.time()
score_map = self.pipeline.update(frame)
mask = (score_map > self.threshold).astype(np.uint8) * 255
time_cost = time.time() - time_a
print("frame process, consuming time:", time_cost)
three_channel = cv2.cvtColor(mask, cv2.COLOR_GRAY2BGR)
callback(three_channel, frame_idx)
frame_idx += 1
```
#### File: FGVC/tool/preprocess_mask_rcnn.py
```python
from detectron2.utils.logger import setup_logger
setup_logger()
import argparse
from pathlib import Path
import numpy as np
import cv2
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
import matplotlib.image as mpimg
def preprocess(args):
images = sorted(args.vid_path.glob('*.jpg'))
vid_name = args.vid_path.name
vid_root = args.vid_path.parent
out_mask_dir = vid_root / f'{vid_name}_maskrcnn'
out_mask_dir.mkdir(exist_ok=True)
cfg = get_cfg()
# add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
# Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
cfg.MODEL.DEVICE = 'cpu'
predictor = DefaultPredictor(cfg)
number_of_frames = len(images)
for i in range(0,number_of_frames):
# try:
im = np.array(mpimg.imread(images[i]))
outputs = predictor(im)
if args.class_name == 'anything':
try:
mask = outputs["instances"].pred_masks[0].cpu().numpy()
cv2.imwrite(f"{out_mask_dir}/%05d.png" % (i), mask * 255.0)
except:
cv2.imwrite(f"{out_mask_dir}/%05d.png" % (i), np.zeros((im.shape[0], im.shape[1])))
else:
found_anything = False
for j in range(len(outputs['instances'])):
if predictor.metadata.thing_classes[(outputs['instances'][j].pred_classes.cpu()).long()]==args.class_name:
# found the required class, save the mask
mask = outputs["instances"].pred_masks[j].cpu().numpy()
cv2.imwrite(f"{out_mask_dir}/%05d.png"%(i), mask * 255.0)
found_anything = True
break
else:
# found unneeded class
print("Frame %d: Did not find %s, found %s"%(i,args.class_name,predictor.metadata.thing_classes[(outputs['instances'][j].pred_classes.cpu()).long()]))
if not found_anything:
cv2.imwrite(f"{out_mask_dir}/%05d.png" % (i), np.zeros((im.shape[0],im.shape[1])))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Preprocess image sequence')
parser.add_argument('--vid_path', type=Path, default=Path('./data/'), help='folder to process')
parser.add_argument('--class_name', type=str, default='anything',
help='The foreground object class')
args = parser.parse_args()
preprocess(args=args)
``` |
{
"source": "johnny-walker/SAT",
"score": 3
} |
#### File: SAT/pgm01/ProgramBase.py
```python
import os
import tkinter as tk
from tkinter import messagebox
from tkinter import filedialog
from PIL import Image, ImageTk
import cv2
class PgmBase(tk.Frame):
canvas = None
cvImage = None
lblMsg = None
btnOpen = None
btnReset = None
btnPlay = None
btnPause = None
btnSnap = None
mouseLeftDown = False
imgPosX = 0
imgPosY = 0
def __init__(self, root, width=640, height=480):
super().__init__(root)
self.root = root
self.frame = self
self.tkimage = None
# configure window
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
center = True
x = (screen_width - width)//2 if center else 0
y = 50 if center else 0
self.border = 2
self.padding = 2
self.title = 20
self.msgHeight = self.btnHeight = 20
self.root.width = width + self.padding*2 + self.border*2
self.root.height = height + self.msgHeight + self.btnHeight + self.border*2 + self.padding*2+ self.title
geometry = '{0:d}x{1:d}+{2:d}+{3:d}'.format(root.width, root.height, x, y)
root.geometry(geometry) # ex. root.geometry('600x400+250+50')
root.resizable(False, False)
self.root.title('Image Viewer')
self.imageStartPos = (0, 0)
self.imageClickPos = (0, 0)
self.imgResize = (width, height)
self.loadLayout()
self.bindBtnEvents()
def changeBtnStyle(self, widget, active):
btn = None
if widget == 'brush':
btn = self.btnBrush
elif widget == 'brush_add':
btn = self.btnBrushAdd
elif widget == 'brush_erase':
btn = self.btnBrushErase
elif widget == 'blend':
btn = self.btnBlend
if btn is not None:
if active:
btn.configure(foreground = 'purple')
else:
btn.configure(foreground = 'black')
def changeCursor(self, style):
self.canvas.config(cursor = style)
def bindBtnEvents(self):
self.root.protocol("WM_DELETE_WINDOW", self.onExit)
self.root.bind("<Configure>", self.onResize)
self.root.bind_all('<Key>', self.onKey) # pure virtual
self.btnPrev['command'] = lambda : self.onPrev() # pure virtual
self.btnNext['command'] = lambda : self.onNext() # pure virtual
self.btnBrush['command'] = lambda : self.onBrush() # pure virtual
self.btnBrushAdd['command'] = lambda : self.onBrushAdd() # pure virtual
self.btnBrushErase['command'] = lambda : self.onBrushErase() # pure virtual
self.btnBlend['command'] = lambda : self.onBlend() # pure virtual
self.btnReset['command'] = lambda : self.onReset() # pure virtual
self.btnSave['command'] = lambda : self.onSave() # pure virtual
# mouse events
self.root.bind('<Motion>', self.mouseMove)
self.root.bind("<Button-1>", self.mouseLDown)
self.root.bind("<ButtonRelease-1>", self.mouseLRelease)
self.root.bind("<MouseWheel>", self.mouseWheel)
def mouseMove(self, event):
if event.widget == self.canvas:
x, y = event.x, event.y
#print('{}, {}'.format(x, y))
self.imgPosX = x-self.imageStartPos[0]
self.imgPosY = y-self.imageStartPos[1]
# show image pixel location
msg = '({:d}, {:d})'.format(self.imgPosX, self.imgPosY)
self.lblMsg['text'] = msg
def mouseLDown(self, event):
if event.widget == self.canvas:
self.mouseLeftDown = True
x, y = event.x, event.y
self.imageClickPos = (x-self.imageStartPos[0], y-self.imageStartPos[1])
self.mouseLClick(event)
def mouseLRelease(self, event):
self.mouseLeftDown = False
# virtual func
def mouseLClick(self, event):
print('mouseLClick')
# virtual func
def mouseWheel(self, event):
print (event.delta)
def hitTestImageRect(self, event,pt):
if event.widget == self.canvas:
x1, y1 = 0, 0
x2, y2 = x1+self.imgResize[0], y1+self.imgResize[1]
x, y = pt
if (x1 < x and x < x2):
if (y1 < y and y < y2):
return True
return False
def onResize(self, event):
if event.widget == self.canvas:
self.canvas.update()
self.imgWidth = self.canvas.winfo_width()
self.imgHeight = self.canvas.winfo_height()
def onKey(self, event):
if event.char == event.keysym or len(event.char) == 1:
if event.keysym == 'space':
print("Space")
elif event.keysym == 'Escape':
self.root.destroy()
def onExit(self):
if messagebox.askyesno("Exit", "Do you want to quit the application?"):
self.root.destroy()
def run(self):
self.root.mainloop()
def defineLayout(self, widget, cols=1, rows=1):
for c in range(cols):
widget.columnconfigure(c, weight=1)
for r in range(rows):
widget.rowconfigure(r, weight=1)
def loadLayout(self):
align_mode = 'nswe'
self.imgWidth = self.root.width - self.padding*2 - self.border*2
self.imgHeight = self.root.height - self.btnHeight - self.msgHeight - self.padding*2 - self.border*2
self.canvas = tk.Canvas(self.root, width=self.imgWidth , height=self.imgHeight , bg='gray')
divBtnArea = tk.Frame(self.root, width=self.imgWidth , height=self.btnHeight , bg='white')
divMsg = tk.Frame(self.root, width=self.imgWidth , height=self.msgHeight , bg='black')
self.canvas.grid(row=0, column=0, padx=self.padding, pady=self.padding, sticky=align_mode)
divBtnArea.grid(row=1, column=0, padx=self.padding, pady=self.padding, sticky=align_mode)
divMsg.grid(row=2, column=0, padx=self.padding, pady=self.padding, sticky=align_mode)
self.defineLayout(self.root)
self.defineLayout(self.canvas)
self.defineLayout(divMsg)
self.root.rowconfigure(0, weight=1)
self.root.columnconfigure(0, weight=1)
self.btnPrev = tk.Button(divBtnArea, text='prev')
self.btnPrev.pack(side='left')
self.btnNext = tk.Button(divBtnArea, text='next')
self.btnNext.pack(side='left')
self.btnBrush = tk.Button(divBtnArea, text='brush')
self.btnBrush.pack(side='left')
self.btnBrushAdd = tk.Button(divBtnArea, text=' + ')
self.btnBrushAdd.pack(side='left')
self.btnBrushErase = tk.Button(divBtnArea, text=' - ')
self.btnBrushErase.pack(side='left')
self.btnBlend = tk.Button(divBtnArea, text='blend')
self.btnBlend.pack(side='left')
self.btnReset = tk.Button(divBtnArea, text='reset')
self.btnReset.pack(side='left')
self.btnSave = tk.Button(divBtnArea, text='save')
self.btnSave.pack(side='left')
# label as message
self.lblMsg = tk.Label(divMsg, text='show message here', bg='black', fg='white')
self.lblMsg.grid(row=0, column=0, sticky='w')
self.canvas.update()
self.imgWidth = self.canvas.winfo_width() - self.padding * 2
self.imgHeight = self.canvas.winfo_height() - self.padding * 5
print("image size =", self.imgWidth, self.imgHeight)
def showMessage(self, msg):
self.lblMsg['text'] = msg
# virtual func
def onPrev(self):
print('onPrev')
# virtual func
def onNext(self):
print('onNext')
# virtual func
def onBrush(self):
print('onBrush')
# virtual func
def onBrushAdd(self):
print('onBrushAdd')
# virtual func
def onBrushErase(self):
print('onBrushErase')
# virtual func
def onBlend(self):
print('onBlend')
def loadImage(self, path):
img = cv2.imread(path)
im = self.resize(img)
self.showMessage("file {0:s} loaded".format(path))
return im
# img : cv image
def updateImage(self, img, forceCreate=False):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
im = Image.fromarray(img)
self.tkimage = ImageTk.PhotoImage(im)
if forceCreate or not self.cvImage:
if self.cvImage:
self.canvas.delete(self.cvImage)
self.cvImage = self.canvas.create_image(self.imageStartPos, image=self.tkimage, anchor = 'nw')
else:
self.canvas.itemconfig(self.cvImage, image = self.tkimage)
def resize(self, img):
self.imgResize = self.dimResize(img)
return cv2.resize(img, self.imgResize)
def dimResize(self, im):
tar_ratio = self.imgHeight / self.imgWidth
im_ratio = im.shape[0] / im.shape[1]
if tar_ratio > im_ratio:
# scale by width
width = self.imgWidth
height = round(width * im_ratio)
else:
# scale by height
height = self.imgHeight
width = round(height / im_ratio)
X = (self.imgWidth - width )//2 + self.padding*2
Y = (self.imgHeight - height)//2 + self.padding*2
self.imageStartPos = (X, Y)
#print(self.imageStartPos)
return (width, height)
if __name__ == '__main__':
program = PgmBase(tk.Tk(), width=800, height=600)
program.loadLayout()
program.bindBtnEvents()
# load image data
cwd = os.getcwd()
tiger = os.path.join(cwd, "data/tiger.jpeg")
program.loadImage(tiger)
program.run()
``` |
{
"source": "johnny-walker/tfoptflow",
"score": 2
} |
#### File: tfoptflow/tfoptflow/model_base.py
```python
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
from ckpt_mgr import BestCheckpointSaver
from logger import OptFlowTBLogger
from dataset_base import _DBG_TRAIN_VAL_TEST_SETS
from lr import lr_multisteps_long, lr_multisteps_fine, lr_cyclic_long, lr_cyclic_fine
from mixed_precision import float32_variable_storage_getter
_DEBUG_USE_REF_IMPL = False
class ModelBase:
def __init__(self, name='base', mode='train_with_val', session=None, options=None):
"""Initialize the ModelBase object
Args:
mode: Must be in ['train_noval', 'val', 'train_with_val', 'test']
session: optional TF session
options: see _DEFAULT_PWCNET_TRAIN_OPTIONS comments
Mote:
As explained [here](https://stackoverflow.com/a/36282423), you don't need to use with blocks if you only
have one default graph and one default session. However, we sometimes create notebooks where we pit the
performance of models against each other. Because of that, we need the with block.
# tf.reset_default_graph()
# self.graph = tf.Graph()
# with self.graph.as_default():
"""
assert(mode in ['train_noval', 'train_with_val', 'val', 'val_notrain', 'test'])
self.mode, self.sess, self.opts = mode, session, options
self.y_hat_train_tnsr = self.y_hat_val_tnsr = self.y_hat_test_tnsr = None
self.name = name
self.num_gpus = len(self.opts['gpu_devices'])
self.dbg = False # Set this to True for a detailed log of operation
if _DBG_TRAIN_VAL_TEST_SETS != -1: # Debug mode only
if self.mode in ['train_noval', 'train_with_val']:
self.opts['display_step'] = 10 # show progress every 10 training batches
self.opts['snapshot_step'] = 100 # save trained model every 100 training batches
self.opts['val_step'] = 100 # Test trained model on validation split every 1000 training batches
if self.opts['lr_boundaries'] == 'multisteps':
self.opts['lr_boundaries'] = [int(boundary / 1000) for boundary in self.opts['lr_boundaries']]
self.opts['max_steps'] = self.opts['lr_boundaries'][-1]
else:
self.opts['cyclic_lr_stepsize'] = 50
self.opts['max_steps'] = 500 # max number of training iterations (i.e., batches to run)
tf.reset_default_graph()
self.graph = tf.Graph()
with self.graph.as_default():
# Configure a TF session, if one doesn't already exist
self.config_session(session)
# Build the TF graph
self.build_graph()
###
# Session mgmt
###
def config_session(self, sess):
"""Configure a TF session, if one doesn't already exist.
Args:
sess: optional TF session
"""
if sess is None:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
#if self.dbg:
# config.log_device_placement = True
config.allow_soft_placement = True
self.sess = tf.Session(config=config)
else:
self.sess = sess
tf.logging.set_verbosity(tf.logging.INFO)
###
# Training-specific helpers
###
def config_train_ops(self):
"""Configure training ops. Override this to train your model.
Called by the base class when building the TF graph to setup all the training ops, including:
- setting up loss computations,
- setting up metrics computations,
- selecting an optimizer,
- creating a training schedule.
"""
raise NotImplementedError
def config_loggers(self):
"""Configure train logger and, optionally, val logger.
"""
if self.mode == 'train_with_val':
self.tb_train = OptFlowTBLogger(self.opts['ckpt_dir'], 'train')
self.tb_val = OptFlowTBLogger(self.opts['ckpt_dir'], 'val')
elif self.mode == 'train_noval':
self.tb_train = OptFlowTBLogger(self.opts['ckpt_dir'], 'train')
###
# Checkpoint mgmt
###
def init_saver(self):
"""Creates a default saver to load/save model checkpoints. Override, if necessary.
"""
if self.mode in ['train_noval', 'train_with_val']:
self.saver = BestCheckpointSaver(self.opts['ckpt_dir'], self.name, self.opts['max_to_keep'], maximize=False)
else:
self.saver = tf.train.Saver()
def save_ckpt(self, ranking_value=0):
"""Save a model checkpoint
Args:
ranking_value: The ranking value by which to rank the checkpoint.
"""
assert(self.mode in ['train_noval', 'train_with_val'])
if self.opts['verbose']:
print("Saving model...")
# save_path = self.saver.save(self.sess, self.opts['ckpt_dir'] + self.name, self.g_step_op)
save_path = self.saver.save(ranking_value, self.sess, self.g_step_op)
if self.opts['verbose']:
if save_path is None:
msg = f"... model wasn't saved -- its score ({ranking_value:.2f}) doesn't outperform other checkpoints"
else:
msg = f"... model saved in {save_path}"
print(msg)
def load_ckpt(self):
"""Load a model checkpoint
In train mode, load the latest checkpoint from the checkpoint folder if it exists; otherwise, run initializer.
In other modes, load from the specified checkpoint file.
"""
if self.mode in ['train_noval', 'train_with_val']:
self.last_ckpt = None
if self.opts['train_mode'] == 'fine-tune':
# In fine-tuning mode, we just want to load the trained params from the file and that's it...
assert(tf.train.checkpoint_exists(self.opts['ckpt_path']))
if self.opts['verbose']:
print(f"Initializing from pre-trained model at {self.opts['ckpt_path']} for finetuning...\n")
# ...however, the AdamOptimizer also stores variables in the graph, so reinitialize them as well
self.sess.run(tf.variables_initializer(self.optim.variables()))
# Now initialize the trained params with actual values from the checkpoint
_saver = tf.train.Saver(var_list=tf.trainable_variables())
_saver.restore(self.sess, self.opts['ckpt_path'])
if self.opts['verbose']:
print("... model initialized")
self.last_ckpt = self.opts['ckpt_path']
else:
# In training mode, we either want to start a new training session or resume from a previous checkpoint
self.last_ckpt = self.saver.best_checkpoint(self.opts['ckpt_dir'], maximize=False)
if self.last_ckpt is None:
self.last_ckpt = tf.train.latest_checkpoint(self.opts['ckpt_dir'])
if self.last_ckpt:
# We're resuming a session -> initialize the graph with the content of the checkpoint
if self.opts['verbose']:
print(f"Initializing model from previous checkpoint {self.last_ckpt} to resume training...\n")
self.saver.restore(self.sess, self.last_ckpt)
if self.opts['verbose']:
print("... model initialized")
else:
# Initialize all the variables of the graph
if self.opts['verbose']:
print(f"Initializing model with random values for initial training...\n")
assert (self.mode in ['train_noval', 'train_with_val'])
self.sess.run(tf.global_variables_initializer())
if self.opts['verbose']:
print("... model initialized")
else:
# Initialize the graph with the content of the checkpoint
self.last_ckpt = self.opts['ckpt_path']
assert(self.last_ckpt is not None)
if self.opts['verbose']:
print(f"Loading model checkpoint {self.last_ckpt} for eval or testing...\n")
self.saver.restore(self.sess, self.last_ckpt)
if self.opts['verbose']:
print("... model loaded")
# frozen and summary
#tf.io.write_graph(self.sess.graph_def, "./models_frozen/", "unfrozen.pb", as_text=False)
#tf.summary.FileWriter('./logs', graph=self.sess.graph_def)
frozen = tf.graph_util.convert_variables_to_constants(self.sess, self.sess.graph_def, ["pwcnet/flow_pred"])
tf.summary.FileWriter('./logs', graph=frozen)
tf.io.write_graph(frozen, "./models_frozen/", "frozen.pb", as_text=False)
###
# Model mgmt
###
def build_model(self):
"""Build model. Override this.
"""
raise NotImplementedError
def set_output_tnsrs(self):
"""Initialize output tensors. Override this.
"""
raise NotImplementedError
###
# Graph mgmt
###
def config_placeholders(self):
"""Configure input and output tensors
Args:
x_dtype, x_shape: type and shape of elements in the input tensor
y_dtype, y_shape: shape of elements in the input tensor
"""
# Increase the batch size with the number of GPUs dedicated to computing TF ops
batch_size = self.num_gpus * self.opts['batch_size']
self.x_tnsr = tf.placeholder(self.opts['x_dtype'], [batch_size] + self.opts['x_shape'], 'x_tnsr')
self.y_tnsr = tf.placeholder(self.opts['y_dtype'], [batch_size] + self.opts['y_shape'], 'y_tnsr')
def build_graph(self):
""" Build the complete graph in TensorFlow
"""
# with tf.device(self.main_device):
# Configure input and output tensors
self.config_placeholders()
# Build the backbone network, then:
# In training mode, configure training ops (loss, metrics, optimizer, and lr schedule)
# Also, config train logger and, optionally, val logger
# In validation mode, configure validation ops (loss, metrics)
if self.mode in ['train_noval', 'train_with_val']:
if self.opts['use_mixed_precision'] is True:
with tf.variable_scope('fp32_vars', custom_getter=float32_variable_storage_getter):
if self.num_gpus == 1:
self.build_model()
self.config_train_ops()
else:
self.build_model_towers()
else:
if self.num_gpus == 1:
self.build_model()
self.config_train_ops()
else:
self.build_model_towers()
self.config_loggers()
elif self.mode in ['val', 'val_notrain']:
if self.opts['use_mixed_precision'] is True:
with tf.variable_scope('fp32_vars', custom_getter=float32_variable_storage_getter):
self.build_model()
self.setup_metrics_ops()
else:
self.build_model()
self.setup_metrics_ops()
else: # inference mode
if self.opts['use_mixed_precision'] is True:
with tf.variable_scope('fp32_vars', custom_getter=float32_variable_storage_getter):
self.build_model()
else:
self.build_model()
# Set output tensors
self.set_output_tnsrs()
# Init saver (override if you wish) and load checkpoint if it exists
self.init_saver()
self.load_ckpt()
###
# Sample mgmt (preprocessing and postprocessing)
###
def adapt_x(self, x):
"""Preprocess the input samples to adapt them to the network's requirements
Here, x, is the actual data, not the x TF tensor. Override as necessary.
Args:
x: input samples
Returns:
Samples ready to be given to the network (w. same shape as x) and companion adaptation info
"""
return x, None
def adapt_y(self, y):
"""Preprocess the labels to adapt them to the loss computation requirements of the network
Here, y, is the actual data, not the y TF tensor. Override as necessary.
Args:
y: training labels
Returns:
Labels ready to be used by the network's loss function (w. same shape as y) and companion adaptation inf
"""
return y, None
def postproc_y_hat(self, y_hat):
"""Postprocess the predictions coming from the network. Override as necessary.
Here, y_hat, is the actual data, not the y_hat TF tensor.
Args:
y_hat: predictions
Returns:
Postprocessed labels
"""
return y_hat
###
# Learning rate helpers
###
def setup_lr_sched(self):
"""Setup a learning rate training schedule and setup the global step. Override as necessary.
"""
assert (self.opts['lr_policy'] in [None, 'multisteps', 'cyclic'])
self.g_step_op = tf.train.get_or_create_global_step()
# Use a set learning rate, if requested
if self.opts['lr_policy'] is None:
self.lr = tf.constant(self.opts['init_lr'])
return
# Use a learning rate schedule, if requested
assert (self.opts['train_mode'] in ['train', 'fine-tune'])
if self.opts['lr_policy'] == 'multisteps':
boundaries = self.opts['lr_boundaries']
values = self.opts['lr_values']
if self.opts['train_mode'] == 'train':
self.lr = lr_multisteps_long(self.g_step_op, boundaries, values)
else:
self.lr = lr_multisteps_fine(self.g_step_op, boundaries, values)
else:
lr_base = self.opts['cyclic_lr_base']
lr_max = self.opts['cyclic_lr_max']
lr_stepsize = self.opts['cyclic_lr_stepsize']
if self.opts['train_mode'] == 'train':
self.lr = lr_cyclic_long(self.g_step_op, lr_base, lr_max, lr_stepsize)
else:
self.lr = lr_cyclic_fine(self.g_step_op, lr_base, lr_max, lr_stepsize)
###
# Debug utils
###
def summary(self):
model_vars = tf.trainable_variables()
slim.model_analyzer.analyze_vars(model_vars, print_info=True)
def print_config(self):
"""Display configuration values.
Ref:
- How to count total number of trainable parameters in a tensorflow model?
https://stackoverflow.com/questions/38160940/how-to-count-total-number-of-trainable-parameters-in-a-tensorflow-model
"""
with self.graph.as_default():
print("\nModel Configuration:")
for k, v in self.opts.items():
if self.mode in ['train_noval', 'train_with_val']:
if self.opts['lr_policy'] == 'multisteps':
if k in ['init_lr', 'cyclic_lr_max', 'cyclic_lr_base', 'cyclic_lr_stepsize']:
continue
if self.opts['lr_policy'] == 'cyclic':
if k in ['init_lr', 'lr_boundaries', 'lr_values']:
continue
print(f" {k:22} {v}")
print(f" {'mode':22} {self.mode}")
# if self.mode in ['train_noval', 'train_with_val']:
#if self.dbg:
# self.summary()
print(f" {'trainable params':22} {np.sum([np.prod(v.shape) for v in tf.trainable_variables()])}")
``` |
{
"source": "JohnnyWang1998/ukraineRussiaTweetsSentimentAnalysis",
"score": 4
} |
#### File: src/data/extract_media_data.py
```python
from bs4 import BeautifulSoup
import urllib
import pandas as pd
def get_media_dict(url: str) -> dict:
"""
Get a dictionary of media accounts from website url.
@param url: website url
return: a dictionary of media accounts
"""
soup = BeautifulSoup(urllib.request.urlopen(url).read())
media_account_dict = {}
# get the table with media account information
account_table = soup.findAll('table')[0].tbody
for (i, row) in enumerate(account_table.findAll('tr')):
# skip the table head
if i == 0:
continue
# get name of each row
user_name = row.findAll('td')[1].text
real_name = row.findAll('td')[2].text
# add names without @
media_account_dict[real_name] = user_name[1:]
return media_account_dict
def export_media_list(url: str):
"""
Export a list of media accounts from website url.
@param url: website url
"""
media_account_dict = get_media_dict(url)
# export to a csv file
media_df = pd.DataFrame(list(zip(media_account_dict.keys(), media_account_dict.values())),
columns=['real_name', 'user_name'])
media_df.head()
media_df.to_csv('../data/external/media_accounts.csv', index=False)
```
#### File: src/data/make_dataset.py
```python
import pandas as pd
from tqdm import tqdm
import gzip
import emoji
import httpx
from googletrans import Translator
import time
import ast
def get_dataset(path, file_list):
final_df = pd.DataFrame() # initiate a empty dataframe to append(concat) into
with tqdm(total=len(file_list)) as pbar: # tqdm shows you the progress of looping
for file in file_list:
with gzip.open(path + '/' + file) as f:
df = pd.read_csv(f)
df.drop(['Unnamed: 0',
'acctdesc',
'location',
'tweetid',
'coordinates',
'extractedts'],
axis=1,
inplace=True) # drop useless columns
df['usercreatedts'] = pd.to_datetime(df['usercreatedts']) # convert to datetime
df = df[df['usercreatedts'].dt.year != 2022] # filter out accounts created recently in 2022
df = df.drop_duplicates(subset=['text'], keep='first') # drop duplicated retweets, keep first
# filter out None English and Russian language tweets
df = df[(df['language'] == 'en') | (df['language'] == 'ru')]
# only keep accounts that have over 200 followers/totaltweets
df = df[(df['followers'] >= 200) & (df['totaltweets'] >= 200)]
final_df = pd.concat([final_df, df], ignore_index=True) # use concat because it's faster than append
pbar.update(1)
final_df = final_df.drop_duplicates(subset=['text'], keep='first') # drop retweets across daily borders
final_df.reset_index(drop=True, inplace=True) # reset index
return final_df
def get_all_emoji(df, language):
res = {}
emoji_collection = emoji.UNICODE_EMOJI[language]
for line in df.text.tolist():
for c in line:
if c in emoji_collection:
if c in res:
res[c] += 1
else:
res[c] = 1
return pd.DataFrame.from_dict({'emoji': list(res.keys()),
'UNICODE': [emoji.demojize(emo, language='en') for emo in list(res.keys())],
'count': list(res.values())}).sort_values('count', ascending=False).reset_index(
drop=True)
def count_hashtags(df):
counts = {}
for tags in df.hashtags.tolist():
for tag in tags:
if tag in counts:
counts[tag] += 1
else:
counts[tag] = 1
return pd.DataFrame.from_dict(counts, orient='index', columns=['hashtag']).sort_values(['hashtag'], ascending=False)
def run_translate(df):
result = []
timeout = httpx.Timeout(10) # increase timeout to 10 sec
translator = Translator(timeout=timeout)
with tqdm(total=len(df.text.tolist())) as pbar:
for i, line in enumerate(df.text.tolist()):
try:
res = translator.translate(line, src='ru', dest='en').text
except TypeError: # Handle weird Json TypeError
res = 'TypeError'
result.append(res)
if (i + 1) % 4 == 0:
time.sleep(1) # limit api calls to under 5 per second
pbar.update(1)
return result
def process_new_data(df):
df.drop(['_type', 'url', 'renderedContent', 'id', 'replyCount', 'quoteCount', 'conversationId', 'source',
'sourceUrl', 'sourceLabel', 'outlinks', 'tcooutlinks', 'retweetedTweet', 'media', 'quotedTweet',
'inReplyToTweetId', 'inReplyToUser', 'mentionedUsers', 'coordinates', 'place', 'cashtags', 'Searh'],
axis=1,
inplace=True,
errors='ignore') # drop useless columns
df['hashtags'] = df['hashtags'].apply(lambda d: d if isinstance(d, str) else "[]") # impute NaN with string "[]"
df['hashtags'] = df['hashtags'].apply(lambda x: ast.literal_eval(x)) # use literal_eval to turn str to list
df['hashtags'] = df['hashtags'].map(lambda x: list(map(str.lower, x))) # hashtags to lower case
df['user'] = df['user'].apply(lambda x: ast.literal_eval(x)) # turn str to dict
from_list = ['created', 'friendsCount', 'followersCount', 'statusesCount', 'id', 'username']
to_list = ['usercreatedts', 'following', 'followers', 'totaltweets', 'userid', 'username']
for x, y in zip(from_list, to_list):
df[y] = df['user'].apply(lambda z: z[x]) # extract features from 'user' column
df.drop(['user'], axis=1, inplace=True)
df.rename(columns={"date": "tweetcreatedts",
"content": "text",
"retweetCount": "retweetcount",
"likeCount": "favorite_count",
"lang": "language"}, inplace=True) # rename columns to match the original dataset
df['usercreatedts'] = pd.to_datetime(df['usercreatedts'])
df['usercreatedts'] = df['usercreatedts'].dt.tz_localize(None)
df['tweetcreatedts'] = pd.to_datetime(df['tweetcreatedts'])
df['tweetcreatedts'] = df['tweetcreatedts'].dt.tz_localize(None) # format time
df = df[['userid', 'username', 'following', 'followers',
'totaltweets', 'usercreatedts', 'tweetcreatedts',
'retweetcount', 'text', 'hashtags', 'language',
'favorite_count']] # rearrange column order
df.sort_values(by=['tweetcreatedts'], inplace=True) # sort by tweetcreatedts
df.reset_index(inplace=True, drop=True)
return df
```
#### File: src/models/ngram.py
```python
import numpy as np
def make_ngrams(tokens: list, n: int) -> list:
"""Creates n-grams for the given token sequence.
Args:
tokens (list): a list of tokens as strings
n (int): the length of n-grams to create
Returns:
list: list of tuples of strings, each tuple being one of the individual n-grams
"""
n_grams = []
for token in tokens:
word_list = token.split(" ") # split with whitespace
# initialize index
starting_index = 0
end_index = starting_index + n - 1
# use sliding window to append tuples of length n to n_grams
while end_index < len(word_list):
n_grams.append(tuple(word_list[starting_index: starting_index + n]))
starting_index += 1
end_index += 1
return n_grams
def train_ngram(content, n_gram):
# Get the count of each word
UNK = "<UNK>"
word_count = {}
for line in content:
for word in line:
if word in word_count.keys():
word_count[word] += 1
else:
word_count[word] = 1
# Replace the words with <UNK> if count is < threshold(=1)
UNK_count_dict = dict(
filter(lambda elem: elem[1] == 1, word_count.items())) # get dictionary of words whose count == 1
word_count[UNK] = len(UNK_count_dict) # add UNK to word_count
for temp_key in UNK_count_dict.keys(): # pop count == 1 words from word_count
word_count.pop(temp_key, None)
# make use of make_n_grams function
n_gram_counts = {}
for line in content:
for ngram_tuple in make_ngrams(line, n_gram):
if ngram_tuple in n_gram_counts.keys():
n_gram_counts[ngram_tuple] += 1
else:
n_gram_counts[ngram_tuple] = 1
# Get the training data vocabulary
vocab = list(word_count.keys())
# For n>1 grams compute n-1 gram counts to compute probability
n_minus_1_gram_counts = {}
if n_gram > 1:
for line in content:
for n_minus_1_gram_tuple in make_ngrams(line, n_gram - 1):
if n_minus_1_gram_tuple in n_minus_1_gram_counts.keys():
n_minus_1_gram_counts[n_minus_1_gram_tuple] += 1
else:
n_minus_1_gram_counts[n_minus_1_gram_tuple] = 1
return n_gram_counts, vocab, n_minus_1_gram_counts, word_count
def generate_sentence(n_gram_counts, vocab, n_minus_1_gram_counts, word_count, n_gram=5, max_length=20):
"""Generates a single sentence from a trained language model using the Shannon technique.
Returns:
str: the generated sentence
"""
# Start with <s> and randomly generate words until we encounter sentence end
# Append sentence begin markers for n>2
# Keep track of previous word for stop condition
SENT_BEGIN = "<s>"
SENT_END = "</s>"
n = n_gram
sentence = [SENT_BEGIN]
if n > 2:
for i in range(0, n - 2):
sentence.insert(0, SENT_BEGIN)
if n > 1:
while sentence[-1:][0] != SENT_END and len(sentence) <= max_length:
# Construct the (n-1) gram so far
n_minus_one = tuple(sentence[-(n - 1):])
# Get the counts of all available choices based on n-1 gram
choices_and_counts = {}
for key in n_gram_counts:
if n_minus_one == key[:n - 1]:
choice = list(key[-1:])[0]
count = n_gram_counts[key]
choices_and_counts[choice] = count
# Convert the counts into probability for random.choice() function
temp_sum = sum(list(choices_and_counts.values()))
for choice in choices_and_counts:
choices_and_counts[choice] = (choices_and_counts[choice]) / temp_sum
while True:
word_generated = np.random.choice(list(choices_and_counts.keys()), 1,
list(choices_and_counts.values())).astype(str)
if word_generated != SENT_BEGIN:
break
sentence.append(word_generated[0])
# If <s> is generated, ignore and generate another word
else:
# In case of unigram model, n-1 gram is just the previous word and possible choice is whole vocabulary
while sentence[-1:][0] != SENT_END:
# Convert the counts into probability for random.choice() function
temp_sum = sum(list(word_count.values()))
for choice in word_count:
word_count[choice] = word_count[choice] / temp_sum
while True:
word_generated = np.random.choice(list(word_count.keys()), 1, list(word_count.values())).astype(str)
if word_generated != SENT_BEGIN:
break
sentence.append(word_generated[0])
# If <s> is generated, ignore and generate another word
# Append sentence end markers for n>2
if n > 2:
for i in range(0, n - 2):
sentence.append(SENT_END)
return ' '.join(word for word in sentence)
``` |
{
"source": "johnny-wang/indoor_mapping_robot",
"score": 2
} |
#### File: xbox_controller/scripts/controller_output.py
```python
import rospy
from std_msgs.msg import Float32
from sensor_msgs.msg import Joy
def joy_callback(joy_data):
left_updown = Float32()
left_updown.data = joy_data.axes[1]
pub_left_updown.publish(left_updown)
left_leftright = Float32()
left_leftright.data = joy_data.axes[0]
pub_left_leftright.publish(left_leftright)
right_updown = Float32()
right_updown.data = joy_data.axes[3]
pub_right_updown.publish(right_updown)
right_leftright = Float32()
right_leftright.data = joy_data.axes[2]
pub_right_leftright.publish(right_leftright)
def joy_xbox_controller():
global pub_left_updown, pub_left_leftright
global pub_right_updown, pub_right_leftright
topic_l_updown = 'xbox_controller/left_stick_updown'
topic_l_leftright = 'xbox_controller/left_stick_leftright'
topic_r_updown = 'xbox_controller/right_stick_updown'
topic_r_leftright = 'xbox_controller/right_stick_leftright'
pub_left_updown = rospy.Publisher(topic_l_updown, Float32, queue_size=10)
pub_left_leftright = rospy.Publisher(topic_l_leftright, Float32, queue_size=10)
pub_right_updown = rospy.Publisher(topic_r_updown, Float32, queue_size=10)
pub_right_leftright = rospy.Publisher(topic_r_leftright, Float32, queue_size=10)
rospy.Subscriber('/joy', Joy, joy_callback)
rospy.init_node('XboxControllerOuput')
rospy.spin()
if __name__ == '__main__':
joy_xbox_controller()
``` |
{
"source": "johnny-wang/staketaxcsv",
"score": 3
} |
#### File: src/osmo/api_historical.py
```python
import logging
import requests
import time
import urllib.parse
OSMO_HISTORICAL_NODE = "https://api-osmosis.imperator.co"
class OsmoHistoricalAPI:
@classmethod
def get_symbol(cls, ibc_address):
uri = "/search/v1/symbol?denom={}".format(urllib.parse.quote(ibc_address))
data = cls._query(uri)
if "symbol" in data:
return data["symbol"]
else:
return None
@classmethod
def _query(cls, uri):
url = "{}{}".format(OSMO_HISTORICAL_NODE, uri)
logging.info("Querying url=%s...", url)
response = requests.get(url)
data = response.json()
time.sleep(1)
return data
```
#### File: src/osmo/make_tx.py
```python
from common.make_tx import (
make_swap_tx, make_reward_tx, make_transfer_in_tx, make_transfer_out_tx,
make_unknown_tx, make_unknown_tx_with_transfer, _make_tx_exchange
)
from osmo import util_osmo
def _edit_row(row, txinfo, msginfo):
row.txid = txinfo.txid + "-" + str(msginfo.msg_index)
if msginfo.msg_index > 0:
row.fee = ""
row.fee_currency = ""
def make_osmo_tx(txinfo, msginfo, sent_amount, sent_currency, received_amount, received_currency,
txid=None, empty_fee=False):
tx_type = util_osmo._make_tx_type(msginfo)
row = _make_tx_exchange(
txinfo, sent_amount, sent_currency, received_amount, received_currency, tx_type,
txid=txid, empty_fee=empty_fee)
_edit_row(row, txinfo, msginfo)
return row
def make_osmo_simple_tx(txinfo, msginfo):
row = make_osmo_tx(txinfo, msginfo, "", "", "", "")
return row
def make_osmo_swap_tx(txinfo, msginfo, sent_amount, sent_currency, received_amount, received_currency):
row = make_swap_tx(txinfo, sent_amount, sent_currency, received_amount, received_currency)
_edit_row(row, txinfo, msginfo)
return row
def make_osmo_reward_tx(txinfo, msginfo, reward_amount, reward_currency):
row = make_reward_tx(txinfo, reward_amount, reward_currency)
_edit_row(row, txinfo, msginfo)
return row
def make_osmo_transfer_out_tx(txinfo, msginfo, sent_amount, sent_currency, dest_address=None):
row = make_transfer_out_tx(txinfo, sent_amount, sent_currency, dest_address)
_edit_row(row, txinfo, msginfo)
return row
def make_osmo_transfer_in_tx(txinfo, msginfo, received_amount, received_currency):
row = make_transfer_in_tx(txinfo, received_amount, received_currency)
_edit_row(row, txinfo, msginfo)
return row
def make_osmo_unknown_tx(txinfo, msginfo):
row = make_unknown_tx(txinfo)
_edit_row(row, txinfo, msginfo)
return row
def make_osmo_unknown_tx_with_transfer(txinfo, msginfo, sent_amount, sent_currency, received_amount,
received_currency, empty_fee=False, z_index=0):
row = make_unknown_tx_with_transfer(
txinfo, sent_amount, sent_currency, received_amount, received_currency, empty_fee, z_index)
_edit_row(row, txinfo, msginfo)
return row
def make_osmo_lp_deposit_tx(txinfo, msginfo, sent_amount, sent_currency, lp_amount, lp_currency, empty_fee=False):
row = make_osmo_tx(txinfo, msginfo, sent_amount, sent_currency, lp_amount, lp_currency,
txid=None, empty_fee=empty_fee)
return row
def make_osmo_lp_withdraw_tx(txinfo, msginfo, lp_amount, lp_currency, received_amount, received_currency,
empty_fee=False):
row = make_osmo_tx(txinfo, msginfo, lp_amount, lp_currency, received_amount, received_currency,
txid=None, empty_fee=empty_fee)
return row
def make_osmo_lp_stake_tx(txinfo, msginfo, lp_amount, lp_currency):
row = make_osmo_tx(txinfo, msginfo, lp_amount, lp_currency, "", "")
return row
def make_osmo_lp_unstake_tx(txinfo, msginfo, lp_amount, lp_currency):
row = make_osmo_tx(txinfo, msginfo, "", "", lp_amount, lp_currency)
return row
```
#### File: src/osmo/processor.py
```python
import logging
import pprint
from datetime import datetime
from osmo.TxInfoOsmo import TxInfoOsmo, MsgInfo
from osmo.handle_unknown import handle_unknown_detect_transfers
from osmo.handle_general import (
handle_simple, handle_simple_outbound, handle_transfer_ibc, handle_failed_tx, handle_transfer
)
from osmo.handle_staking import handle_staking
from osmo.handle_swap import handle_swap
from osmo import util_osmo
from osmo import constants as co
from osmo.handle_lp import (
handle_lp_deposit, handle_lp_stake, handle_lp_unstake, handle_lp_withdraw,
handle_lp_deposit_partial)
def process_txs(wallet_address, elems, exporter):
for i, elem in enumerate(elems):
process_tx(wallet_address, elem, exporter)
def process_tx(wallet_address, elem, exporter):
txinfo = _parse_tx(elem, wallet_address)
# Detect failed transaction
if elem["code"] > 0:
handle_failed_tx(exporter, txinfo)
return txinfo
for msginfo in txinfo.msgs:
_handle_message(exporter, txinfo, msginfo)
return txinfo
def _handle_message(exporter, txinfo, msginfo):
try:
msg_type = util_osmo._msg_type(msginfo)
# simple transactions, that are typically ignored
if msg_type in [co.MSG_TYPE_VOTE, co.MSG_TYPE_SET_WITHDRAW_ADDRESS, co.MSG_TYPE_BEGIN_UNLOCKING]:
# 0 transfers
handle_simple(exporter, txinfo, msginfo)
elif msg_type in [co.MSG_TYPE_SUBMIT_PROPOSAL, co.MSG_TYPE_DEPOSIT]:
# 1 outbound transfer
handle_simple_outbound(exporter, txinfo, msginfo)
elif msg_type in [co.MSG_TYPE_UPDATE_CLIENT, co.MSG_TYPE_ACKNOWLEDGMENT]:
pass
# staking rewards
elif msg_type in [co.MSG_TYPE_DELEGATE, co.MSG_TYPE_REDELEGATE, co.MSG_TYPE_WITHDRAW_REWARD,
co.MSG_TYPE_WITHDRAW_COMMISSION, co.MSG_TYPE_UNDELEGATE]:
handle_staking(exporter, txinfo, msginfo)
# transfers
elif msg_type in [co.MSG_TYPE_IBC_TRANSFER, co.MSG_TYPE_MSGRECVPACKET]:
handle_transfer_ibc(exporter, txinfo, msginfo)
elif msg_type == co.MSG_TYPE_SEND:
handle_transfer(exporter, txinfo, msginfo)
# swaps
elif msg_type == co.MSG_TYPE_SWAP_IN:
handle_swap(exporter, txinfo, msginfo)
# lp transactions
elif msg_type == co.MSG_TYPE_JOIN_POOL:
handle_lp_deposit(exporter, txinfo, msginfo)
elif msg_type == co.MSG_TYPE_JOIN_SWAP_EXTERN_AMOUNT_IN:
handle_lp_deposit_partial(exporter, txinfo, msginfo)
elif msg_type == co.MSG_TYPE_EXIT_POOL:
handle_lp_withdraw(exporter, txinfo, msginfo)
elif msg_type == co.MSG_TYPE_LOCK_TOKENS:
handle_lp_stake(exporter, txinfo, msginfo)
else:
handle_unknown_detect_transfers(exporter, txinfo, msginfo)
except Exception as e:
logging.error(
"Exception when handling txid=%s, exception=%s", txinfo.txid, str(e))
handle_unknown_detect_transfers(exporter, txinfo, msginfo)
return txinfo
def _parse_tx(elem, wallet_address):
txid = elem["txhash"]
timestamp = datetime.strptime(
elem["timestamp"], "%Y-%m-%dT%H:%M:%SZ").strftime("%Y-%m-%d %H:%M:%S")
fee, fee_currency = _fee(elem)
# Construct list of MsgInfo's
msgs = []
for i in range(len(elem["logs"])):
message = elem["tx"]["body"]["messages"][i]
log = elem["logs"][i]
transfers = util_osmo._transfers(log, wallet_address)
msginfo = MsgInfo(message, transfers, i, log)
msgs.append(msginfo)
txinfo = TxInfoOsmo(txid, timestamp, fee, wallet_address, msgs)
return txinfo
def _fee(elem):
fees = elem["tx"]["auth_info"]["fee"]["amount"]
if len(fees) == 0:
return "", ""
first_fee = fees[0]
fee_amount = float(first_fee["amount"]) / co.MILLION
fee_currency = co.CUR_OSMO
if not fee_amount:
return "", ""
return fee_amount, fee_currency
``` |
{
"source": "johnnywell/django-dynoforms",
"score": 2
} |
#### File: django-dynoforms/dynoforms/models.py
```python
from django.conf import settings
from django.db import models
from django.db.models import permalink
from django.contrib.postgres.fields import JSONField
from django.core.serializers.json import DjangoJSONEncoder
from django.utils.translation import ugettext_lazy as _
class BaseModel(models.Model):
created = models.DateTimeField(_("Created at"), auto_now_add=True)
updated = models.DateTimeField(_("Last update"), auto_now=True)
owner = models.ForeignKey(settings.AUTH_USER_MODEL,
help_text="Who created it")
deleted = models.BooleanField(_('deleted'), default=False, editable=False)
class Meta(object):
abstract = True
class Schema(BaseModel):
name = models.CharField(_('name'), max_length=30,
help_text=_('A name to identify the schema'))
description = models.TextField(
_('description'),
help_text=_('A description to guide the form usage'),
blank=True,
)
fields = JSONField(_('fields'), encoder=DjangoJSONEncoder)
def __str__(self):
return self.name
@permalink
def new_entry_url(self):
return ('dynoforms-new-entry', [self.pk])
class Entry(BaseModel):
schema = models.ForeignKey(Schema)
data = JSONField(_('data'), encoder=DjangoJSONEncoder, blank=True)
@permalink
def get_absolute_url(self):
return ('dynoforms-entry-detail', [self.pk])
```
#### File: django-dynoforms/dynoforms/views.py
```python
from django.views.generic import ListView, DetailView, CreateView
from django.shortcuts import get_object_or_404
from dynoforms.models import Schema, Entry
from dynoforms.forms import DynoForm
class Schemes(ListView):
model = Schema
template_name = 'dynoforms/schemes.html'
context_object_name = 'schemes'
class EntryDetail(DetailView):
model = Entry
tempalte_name = 'dynoforms/entry_detail.html'
class NewEntry(CreateView):
model = Entry
form_class = DynoForm
template_name = 'dynoforms/form.html'
def get_context_data(self, **kwargs):
context = {}
context['schema'] = get_object_or_404(Schema, pk=self.kwargs['pk'])
context.update(kwargs)
return super(NewEntry, self).get_context_data(**context)
def get_form_kwargs(self):
kwargs = super(NewEntry, self).get_form_kwargs()
if self.kwargs['pk']:
kwargs.update({'schema': get_object_or_404(
Schema, pk=self.kwargs['pk'])
})
kwargs['owner'] = self.request.user
return kwargs
``` |
{
"source": "johnnywell/liasis",
"score": 3
} |
#### File: liasis/core/event.py
```python
from datetime import datetime
from typing import NewType, Optional
from uuid import UUID
EventId = NewType('EventId', UUID)
class EventMetaClass(type):
def __new__(mcls, name, bases, attrs):
return super().__new__(mcls, name, bases, attrs)
@classmethod
def __prepare__(mcls, name, bases, **kwargs):
return super().__prepare__(mcls, name, bases, **kwargs)
class Event(metaclass=EventMetaClass):
def __init__(self,
id: EventId,
occurred_on: datetime,
version: Optional[int] = None,
):
self.id = id
self.occurred_on = occurred_on
self.version = version
def __eq__(self, other):
return self.id == other.id
def __repr__(self):
class_name = self.__class__.__name__
return f'<{class_name}: {self.id}>'
```
#### File: liasis/core/protocols.py
```python
from abc import abstractmethod
from functools import reduce
from typing import List, Any, Dict, Type, Set, ClassVar, TypeVar, Union, Optional, Iterable
from typing_extensions import Protocol
from liasis.core.errors import InvalidEventError, InvalidEventVersionError, InvalidEventEntityError, NotHandledError
from liasis.core.data import Response, Request
from liasis.core.entity import EntityId, Entity
from liasis.core.event import Event
class Adapter(Protocol):
following: Optional['Adapter']
@abstractmethod
def can_handle(self, response: Response) -> bool:
raise NotImplementedError
@abstractmethod
def handle(self, response: Response) -> Any:
raise NotImplementedError
def __call__(self, response: Response) -> Any:
if self.can_handle(response):
return self.handle(response)
if self.following:
return self.following(response)
raise NotHandledError('The response was not handled by any adapter.', response)
class Presenter:
adapters = Iterable[Type[Adapter]]
def __init__(self):
self.chain = reduce(lambda f, n: n(f), self.adapters[::-1], None)
def __call__(self, response: Response) -> Any:
try:
return self.chain(response)
except NotHandledError:
raise
class UseCase(Protocol):
presenter: Presenter
def __call__(self, request: Request) -> Presenter:
try:
response = self.on_success(self.handle(request))
except Exception as error:
response = self.on_error(error)
finally:
return self.respond(response)
@abstractmethod
def handle(self, request: Request) -> Response:
raise NotImplementedError
def on_success(self, response: Response) -> Response:
return response
def on_error(self, error: Exception) -> Response:
return Response(error=error)
def respond(self, response: Response) -> Presenter:
return self.presenter(response)
E = TypeVar('E')
class Repository(Protocol[E]):
"""
A Repository is responsible for storing and retrieving entities.
No matter where the data come from, it could be a database or a plain file.
"""
@abstractmethod
def save(self, entity: E) -> E: ...
@abstractmethod
def get(self, id: EntityId) -> E: ...
@abstractmethod
def delete(self, id: EntityId) -> None: ...
@abstractmethod
def search(self, **kwargs) -> Union[E, List[E]]: ...
class Service(Protocol):
"""
Services, different from repositories, do not handle storing and retrieval
of entities state. It's more suitable for things like, e-mails sending.
"""
class Gateway(Protocol):
"""
Gateways are responsible for integrating with external sources and abstract
implementation details from inner components like Repositories and Services.
Examples of Gateways are, REST and SOAP API clients.
"""
class Listener(Protocol):
"""
Any object which need to listen to a event must implement the EventListener
protocol.
"""
@abstractmethod
def notify(self, event: Event):
raise NotImplementedError
ListenersMap = Dict[Type[Event], Set[Listener]]
class Notifier(Protocol):
"""
Manages events subscription and dispatching for different types of events.
"""
listeners: ListenersMap
def subscribe(self, event: Type[Event], listener: Listener):
self.listeners.setdefault(event, set()).add(listener)
def unsubscribe(self, event: Type[Event], listener: Listener):
self.listeners.setdefault(event, set()).discard(listener)
def notify(self, event: Event):
for listener in self.listeners.setdefault(event.__class__, set()):
listener.notify(event)
class State(Protocol):
"""
Represents a state of an object, also handle state transition when receives
a event.
"""
entity: Entity
events: ClassVar[List[Event]]
def on(self, event: Event):
"""
The public API for the event, it receives an event to be handled.
:param event: Event
"""
self.validate_event(event)
self.validate_version(event)
self.validate_entity(event)
return self.handle(event)
@abstractmethod
def handle(self, event: Event) -> Entity:
"""
The handle method should be implemented by each State subclass, so it
can handle it's events list.
:param event: Event
:return:
"""
raise NotImplementedError
def validate_event(self, event: Event):
"""
Check if the State can handle the incoming event.
:param event: Event
"""
if event.__class__ not in self.events:
raise InvalidEventError(
f"{self.__class__.__name__} received a invalid event "
f"{event.__class__.__name__}.")
def validate_version(self, event: Event):
"""
Check if the incoming event has the right version.
:param event:
"""
if event.version is not self.entity.version + 1:
raise InvalidEventVersionError(
f"{self.entity.__class__.__name__}(v{self.entity.version}) received a "
f"invalid event {event.__class__.__name__}(v{event.version})")
def validate_entity(self, event: Event):
"""
Validate if the Entity from the incoming Event is the same of State.
:param event:
"""
if event.entity_id != self.entity.id:
raise InvalidEventEntityError(
f"{self.entity.__class__.__name__}(entity:{self.entity.id}) received a "
f"invalid event {event.__class__.__name__}(entity:{event.entity_id})")
def __repr__(self):
return self.__str__()
def __str__(self):
return self.__class__.__name__
```
#### File: liasis/scaffold/__init__.py
```python
from typing import Text
from pathlib import Path
from shutil import copytree
LIASIS_ROOT = Path(__file__).parent
PROJECT_SKELETON = Path(str(LIASIS_ROOT) + '/project_skeleton')
APP_SKELETON = Path(str(LIASIS_ROOT) + '/app_skeleton')
def new_project(name: Text, directory: Text) -> None:
path = Path('.' if directory is None else directory)
destination = str(path.absolute()) + '/' + name
if path.exists():
copytree(str(PROJECT_SKELETON.absolute()), destination)
def new_app(name: Text, directory: Text) -> None:
path = Path('./apps' if directory is None else directory)
destination = str(path.absolute()) + '/' + name
if path.exists():
copytree(str(APP_SKELETON.absolute()), destination)
``` |
{
"source": "Johnny-Wish/fake-news-detection-pipeline",
"score": 3
} |
#### File: fake-news-detection-pipeline/embedding_utils/embedding_getter.py
```python
import pandas as pd
import numpy as np
import argparse
import pickle as pkl
import os
from doc_utils import DocumentEmbedder, DocumentSequence
from nltk.corpus import stopwords
from string import punctuation
def get_embeddings(input, output, column='title', model='d2v', vec_size=300, pretrained=None, win_size=5, min_count=5,
dm=0, epochs=20, normalizer=None, scorer='count'):
df = pd.read_csv(input)
raw_docs = df[column].values
docs = DocumentSequence(raw_docs, clean=True, sw=stopwords.words('english'), punct=punctuation)
embedder = DocumentEmbedder(docs, pretrained_word2vec=pretrained)
print('computing embeddings')
model = model # type: str
if model.lower() == 'd2v':
out_name = "d2v(vecsize={}, winsize={}, mincount={}, {}, epochs={}).pkl".format(
vec_size, win_size, min_count, "dm" if dm else "dbow", epochs
)
embeddings = embedder.get_doc2vec(vectors_size=int(vec_size),
window=int(win_size),
min_count=int(min_count),
dm=int(dm),
epochs=int(epochs))
elif model.lower() == "nd2v":
out_name = "nd2v(normalizer={}).pkl".format(normalizer)
embeddings = embedder.get_naive_doc2vec(normalizer=normalizer)
elif model.lower() == "onehot":
out_name = "onehot(scorer={}).pkl".format(scorer)
embeddings = embedder.get_onehot(scorer=scorer)
elif model.lower() == "fasttext":
out_name = "fasttext().pkl"
embeddings = embedder._fast_text() # not yet implemented
else:
print("unrecognized model, using naive doc2vec as fallback")
out_name = "nd2v(normalizer={}).pkl".format(normalizer)
embeddings = embedder.get_naive_doc2vec(normalizer=normalizer)
if isinstance(embeddings, list): # if the embeddings is in a list, stack them into a 2-D numpy array
try:
embeddings = np.stack(emb if isinstance(emb, np.ndarray) else np.zeros(vec_size) for emb in embeddings)
except ValueError as e:
print(e)
print("embeddings will be saved in the form of a list")
print("embeddings computed")
# dump the embedding matrix on disk
try:
os.makedirs(output)
except FileExistsError:
print("Parent Dir Existent")
finally:
out_name = column + "-" + out_name
out_path = os.path.join(output, out_name)
with open(out_path, "wb") as f:
print("storing embeddings in {}".format(out_path))
pkl.dump(embeddings, f)
print("embeddings stored")
if __name__ == '__main__':
# control arguments
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True,
help="path to read csv file")
parser.add_argument("--output", required=True,
help="dir to dump embeddings, NOT including filename, created if non-existent")
parser.add_argument("--column", default="title",
help="which column to select from the csv file, default is `title`")
parser.add_argument("--model", default="d2v",
help="model to use, must be one of [d2v, nd2v, onehot, fasttext], default is d2v")
# hyperparameters for doc2vec
parser.add_argument("--vec_size", default=300,
help="size of vectors, default is 300, recommended to be left untouched")
parser.add_argument("--pretrained", default=None,
help="path to word2vec model pretrained on Google News, used if model is d2v or nd2v")
parser.add_argument("--win_size", default=5, type=int,
help="window size, used if model is d2v, default = 5")
parser.add_argument("--min_count", default=5, type=int,
help="min count for inclusion in dict, used if model is d2v, default = 5")
parser.add_argument("--dm", action="store_true",
help="whether to use DM or DBOW, used if model is d2v, default is DBOW")
parser.add_argument("--epochs", default=20, type=int,
help="number of epochs to train the model for, used if model is d2v, default = 20")
# hyperparameters for naive doc2vec
parser.add_argument("--normalizer", default=None,
help="normalizer for naive doc2vec, either l2 or mean, default is None")
# hyperparameters for one-hot
parser.add_argument("--scorer", default="count",
help="scorer function for one-hot, either tfidf or count, default is count")
opt = parser.parse_args()
print(opt)
get_embeddings(opt.input, opt.output, column=opt.column, model=opt.model, vec_size=opt.vec_size,
pretrained=opt.pretrained, win_size=opt.win_size, min_count=opt.min_count, dm=opt.dm,
epochs=opt.epochs, normalizer=opt.normalizer, scorer=opt.scorer)
```
#### File: fake-news-detection-pipeline/embedding_utils/embedding_visualizer.py
```python
import os
import numpy as np
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
def visualize_embeddings(embedding_values, label_values, embedding_name="doc_vec", texts=None, points_to_show=None):
"""
function for visualize embeddings with tensorboard.
MUST run in command line "tensorboard --logdir visual/" and visit localhost:6006 to see the visualization
:param embedding_values: np.ndarray, in the shape of [n_docs, n_dims]
:param label_values: np.ndarray, in the shape of [n_docs]
:param embedding_name: name for the embeddings, spaces are auto-deleted
:param points_to_show: maximum number of points to show
:return: None
"""
TENSORBOARD_ROOT = 'visual' # home directory for running tensorboard server
embedding_name.replace(" ", "") # the `embedding_name` is later used as a tf.scope_name; it mustn't contain spaces
METADATA_PATH = os.path.join(TENSORBOARD_ROOT, 'metadata.tsv') # place to save metadata
assert isinstance(embedding_values, np.ndarray), "{} is not a npndarray".format(embedding_values)
assert isinstance(label_values, np.ndarray), "{} is not a npndarray".format(label_values)
if points_to_show is not None:
points_to_show = min(points_to_show, len(embedding_values), len(label_values))
embedding_values = embedding_values[:points_to_show]
label_values = label_values[:points_to_show]
if texts is not None:
texts = texts[:points_to_show]
embedding_var = tf.Variable(embedding_values, name=embedding_name) # instantiate a tensor to hold embedding values
summary_writer = tf.summary.FileWriter(TENSORBOARD_ROOT) # instantiate a writer to write summaries
config = projector.ProjectorConfig() # `config` maintains arguments for write embeddings and save them on disk
embedding = config.embeddings.add()
embedding.tensor_name = embedding_var.name
# Specify where you find the metadata
embedding.metadata_path = "metadata.tsv" # XXX this step might introduce error, see the printed message below
print("WARNING: potential error due to tensorboard version conflicts")
print("currently setting metadata_path to {}. Due to tensorboard version reasons, if prompted 'metadata not found' "
"when visiting tensorboard server page, please manually edit metadata_path in projector_config.pbtxt to {} "
"or the absolute path for `metadata.tsv` and restart tensorboard".format(embedding.metadata_path,
METADATA_PATH))
print("If your tensorboard version is 1.7.0, you probably should not worry about this")
# call the following method to visualize embeddings
projector.visualize_embeddings(summary_writer, config)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer()) # initialize the `embedding_var`
saver = tf.train.Saver() # instantiate a saver for this session
saver.save(sess, os.path.join(TENSORBOARD_ROOT, "model.ckpt"), 1)
# write metadata (i.e., labels) for emebddings; this is how tensorboard knows labels of different embeddings
with open(METADATA_PATH, 'w') as f:
f.write("Index\tLabel{}\n".format("" if texts is None else "\ttexts"))
if texts is None:
for index, label in enumerate(label_values):
f.write("{}\t{}\n".format(index, label))
else:
for index, label_and_text in enumerate(zip(label_values, texts)):
f.write("{}\t{}\t{}\n".format(index, *label_and_text))
print("Embeddings are available now. Please start your tensorboard server with commandline "
"`tensorboard --logdir visual` and visit http://localhost:6006 to see the visualization")
if __name__ == '__main__':
from .embedding_loader import EmbeddingLoader
import pandas as pd
loader = EmbeddingLoader("embeddings")
visualize_embeddings(embedding_values=loader.get_d2v(corpus="text", win_size=23, dm=False, epochs=500),
embedding_name="text",
label_values=loader.get_label(),
points_to_show=300,
texts=pd.read_csv("fake_or_real_news.csv").title.values)
visualize_embeddings(embedding_values=loader.get_d2v(corpus="title", win_size=23, dm=False, epochs=500),
embedding_name="title",
label_values=loader.get_label(),
points_to_show=3000)
``` |
{
"source": "Johnnywoode/Python-Website-1",
"score": 2
} |
#### File: Python-Website-1/website/views.py
```python
from flask import Blueprint, render_template
views = Blueprint('views', __name__)
@views.route('/')
def home():
return render_template("home.html")
``` |
{
"source": "johnnyxh/PloverBot",
"score": 3
} |
#### File: bot/store/Datastore.py
```python
import motor.motor_asyncio
class Datastore:
def __init__(self, connection_uri = 'mongodb://localhost:27017', db = 'hummingbot'):
self.client = motor.motor_asyncio.AsyncIOMotorClient(connection_uri)
self.db = self.client[db]
async def insert_song(self, song, label = None):
song_query = {'videoId': song.id}
update_operations = {'$inc': { 'play_count': 1 } }
song_entry = await self.db.songs.find_one_and_update(song_query, update_operations)
if song_entry is None:
song_entry = song.to_rest_dict()
song_entry['play_count'] = 1
song_entry['skip_count'] = 0
song_entry['song_label'] = label
await self.db.songs.insert_one(song_entry)
async def update_song_skipped(self, song):
song_query = {'videoId': song.id}
update_operations = {'$inc': { 'skip_count': 1 } }
song_entry = await self.db.songs.find_one_and_update(song_query, update_operations)
```
#### File: bot/store/Datastore_Test.py
```python
import asyncio
import unittest
from unittest.mock import patch, MagicMock
from store.Datastore import Datastore
class AsyncMock(MagicMock):
async def __call__(self, *args, **kwargs):
return super(AsyncMock, self).__call__(*args, **kwargs)
class DatastoreTest(unittest.TestCase):
@patch('store.Datastore.motor.motor_asyncio')
def test_initialization_default(self, motor_mock):
datastore = Datastore()
args, kwargs = motor_mock.AsyncIOMotorClient.call_args
self.assertEqual(args, ('mongodb://localhost:27017',))
@patch('store.Datastore.motor.motor_asyncio')
def test_initialization_uri(self, motor_mock):
datastore = Datastore('mongodb://user:[email protected]:27017/some_db')
args, kwargs = motor_mock.AsyncIOMotorClient.call_args
self.assertEqual(args, ('mongodb://user:[email protected]:27017/some_db',))
@patch('store.Datastore.motor.motor_asyncio')
def test_insert_new(self, motor_mock):
song = MagicMock()
mock_connection = { 'hummingbot': AsyncMock() }
motor_mock.AsyncIOMotorClient.return_value = mock_connection
song.to_rest_dict.return_value = { 'videoId': 'kUaAszRmBbQ', 'title': 'Partner' }
mock_connection['hummingbot'].songs.find_one_and_update.return_value = None
datastore = Datastore()
loop = asyncio.get_event_loop()
loop.run_until_complete(datastore.insert_song(song, 'piano'))
args, kwargs = mock_connection['hummingbot'].songs.insert_one.call_args
self.assertEqual(args, ({ 'videoId': 'kUaAszRmBbQ', 'title': 'Partner', 'play_count': 1, 'skip_count': 0, 'song_label': 'piano' },))
@patch('store.Datastore.motor.motor_asyncio')
def test_insert_existing(self, motor_mock):
song = MagicMock()
song.id = 'kUaAszRmBbQ'
mock_connection = { 'hummingbot': AsyncMock() }
motor_mock.AsyncIOMotorClient.return_value = mock_connection
datastore = Datastore()
loop = asyncio.get_event_loop()
loop.run_until_complete(datastore.insert_song(song))
args, kwargs = mock_connection['hummingbot'].songs.find_one_and_update.call_args
self.assertEqual(args, ({'videoId': 'kUaAszRmBbQ'}, {'$inc': { 'play_count': 1 } },))
@patch('store.Datastore.motor.motor_asyncio')
def test_update_song_skipped(self, motor_mock):
song = MagicMock()
song.id = 'kUaAszRmBbQ'
mock_connection = { 'hummingbot': AsyncMock() }
motor_mock.AsyncIOMotorClient.return_value = mock_connection
datastore = Datastore()
loop = asyncio.get_event_loop()
loop.run_until_complete(datastore.update_song_skipped(song))
args, kwargs = mock_connection['hummingbot'].songs.find_one_and_update.call_args
self.assertEqual(args, ({'videoId': 'kUaAszRmBbQ'}, {'$inc': { 'skip_count': 1 } },))
```
#### File: bot/utils/Playlist.py
```python
import asyncio
import async_timeout
import youtube_dl
import functools
import traceback
from collections import deque
from urllib.parse import urlparse
from urllib.parse import parse_qs
from utils.SongEntry import SongEntry
from utils.Timer import Timer
class Playlist:
YOUTUBE_OPTS = {
'format': 'webm[abr>0]/bestaudio/best',
'prefer_ffmpeg': True,
'verbose': True,
'playlistrandom': True,
'ignoreerrors': True
}
PLAYLIST_PLAYING_RANGE = 2
PLAYLIST_DOWNLOAD_RANGE = 5
def __init__(self, bot):
self.bot = bot
self.player = None
self.songs = deque()
self.play_next_song = asyncio.Event()
self.current_song = None
self.current_song_timer = None
async def add(self, message):
try:
await self.bot.join_channel(message)
args = message.content.split()
video_url = args[1]
add_count = 1
if len(args) > 2:
add_count = int(args[2])
playlist_count = await self._get_playlist_count(video_url)-1
## Remove the use of _get_video_info here
if playlist_count >= 0:
await self.bot.add_reaction(message, '🔄')
lower_bound = 0
opts = self.YOUTUBE_OPTS.copy()
while lower_bound < playlist_count:
upper_bound = lower_bound + self.PLAYLIST_DOWNLOAD_RANGE
if upper_bound >= playlist_count: upper_bound = playlist_count
opts['playlist_items'] = str(lower_bound) + '-' + str(upper_bound)
info = await self._get_video_info(video_url, opts)
if 'entries' in info:
for entry in info['entries']:
if entry is not None:
# Temporary workaround for playlist case, this logic should move
new_song = SongEntry(message.author, message.channel, entry.get('url'))
await new_song.create(entry)
self.songs.appendleft(new_song)
await self.bot.add_reaction(message, '🐦')
asyncio.ensure_future(self._play_next())
lower_bound = upper_bound+1
else:
new_song = SongEntry(message.author, message.channel, video_url)
await new_song.create()
for songs in range(add_count):
self.songs.appendleft(new_song)
await self.bot.add_reaction(message, '🐦')
await self._play_next()
except Exception as err:
raise(err)
async def recommend(self, message):
if await self._user_in_voice_command(message):
recommend_count = 5
if self.current_song is None:
return await self.bot.send_message(message.channel, 'You need to play something first')
args = message.content.split()
if len(args) > 1:
recommend_count = int(args[1])
recommendations = await self.current_song.get_recommendations(self.bot.user, recommend_count)
self.songs.extendleft(recommendations)
async def skip(self, message):
if await self._user_in_voice_command(message):
try:
args = message.content.split()
if len(args) > 1:
for x in range(int(args[1])-1):
self.songs.pop()
# Only mark the current song as skipped for now
await self.bot.store.update_song_skipped(self.current_song)
except IndexError as err:
pass
finally:
if self.player is not None:
self.player.stop()
async def pause(self, message):
if await self._user_in_voice_command(message):
if self.player is not None and self.player.is_playing():
self.player.pause()
self.current_song_timer.pause()
async def resume(self, message):
if await self._user_in_voice_command(message):
if self.player is not None and not self.player.is_playing():
self.player.resume()
self.current_song_timer.resume()
async def clear(self, message):
if await self._user_in_voice_command(message):
if self.player is not None:
self.songs.clear()
self.player.stop()
async def playing(self, message):
song_list = list(self.songs)
if len(song_list) <= 0 and self.current_song is None: return await self.bot.send_message(message.channel, 'There are no songs in the queue')
if (len(song_list) - self.PLAYLIST_PLAYING_RANGE) > 0: await self.bot.send_message(message.channel, 'There are ' + str(len(song_list) - self.PLAYLIST_PLAYING_RANGE) + ' other songs in the queue')
for song in song_list[len(song_list)-self.PLAYLIST_PLAYING_RANGE:]:
await self.bot.send_message(message.channel, embed=song.get_embed_info('Coming up'))
return await self.bot.send_message(message.channel, embed=self.current_song.get_embed_info('Now Playing - {}'.format(self.current_song_timer.get_current_timestamp())))
async def on_voice_state_update(self, before, after):
if self.bot.voice is not None and len(self.bot.voice.channel.voice_members) <= 1:
self.songs.clear()
self.player.stop()
await self.bot.voice.disconnect()
def is_playing(self):
return self.player is not None and self.player.is_playing()
# Need to start rethinking this loop
async def _play_next(self):
if not self.is_playing() and self.current_song is None:
while True:
self.play_next_song.clear()
self.current_song = None
try:
self.current_song = self.songs.pop()
before_options = '-ss {} -reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 2'.format(self.current_song.start_time)
self.player = self.bot.voice.create_ffmpeg_player(self.current_song.url, before_options=before_options, after=self._finished)
self.player.start()
# These operations are not needed for the playlist loop, print exception
# Keep the functionality going
try:
self.current_song_timer = Timer()
self.current_song_timer.start()
print('Playing: {}'.format(self.current_song.title))
await self.bot.send_message(self.current_song.request_channel, embed=self.current_song.get_embed_info('Now Playing'))
await self.bot.store.insert_song(self.current_song)
except :
traceback.print_exc()
finally:
await self.play_next_song.wait()
except :
# An error, most likely from trying to pop an empty deque, should stop the playtlist loop
# The next song added to the queue will restart the playlist loop
return
async def _user_in_voice_command(self, message):
if message.author.voice_channel is not None:
await self.bot.add_reaction(message, '🐦')
return True
else:
await self.bot.send_message(message.channel, 'You should get in a voice channel first')
return False
async def _get_playlist_count(self, youtube_url):
playlist_count = 0
youtube_qparams = parse_qs(urlparse(youtube_url).query)
if 'list' in youtube_qparams:
playlist_opts = self.YOUTUBE_OPTS.copy()
playlist_opts['extract_flat'] = 'in_playlist'
with youtube_dl.YoutubeDL(playlist_opts) as ydl:
func = functools.partial(ydl.extract_info, youtube_url, download=False)
flat_info = await self.bot.loop.run_in_executor(None, func)
playlist_count = len(flat_info['entries'])
return playlist_count
async def _get_video_info(self, youtube_url, opts):
with youtube_dl.YoutubeDL(opts) as ydl:
func = functools.partial(ydl.extract_info, youtube_url, download=False)
return await self.bot.loop.run_in_executor(None, func)
def _finished(self):
self.bot.loop.call_soon_threadsafe(self.play_next_song.set)
```
#### File: bot/utils/Time_Test.py
```python
import unittest
from unittest.mock import patch, MagicMock
from utils.Timer import Timer
from utils.Timer import TimerError
class TimerTest(unittest.TestCase):
@patch('utils.Timer.time')
def test_initialization_default(self, time_mock):
timer = Timer()
self.assertEqual(timer.initial_start_time, 0)
self.assertEqual(timer.start_time, 0)
self.assertEqual(timer.pause_started, None)
self.assertEqual(timer.amount_time_paused, 0)
@patch('utils.Timer.time')
def test_initialization_start_time(self, time_mock):
timer = Timer(50)
self.assertEqual(timer.initial_start_time, 50)
self.assertEqual(timer.start_time, 0)
self.assertEqual(timer.pause_started, None)
self.assertEqual(timer.amount_time_paused, 0)
@patch('utils.Timer.time')
def test_start(self, time_mock):
time_mock.time.return_value = 1525628158.745443
timer = Timer()
timer.start()
self.assertEqual(timer.start_time, 1525628158)
@patch('utils.Timer.time')
def test_start_already_started(self, time_mock):
time_mock.time.return_value = 1525628158.745443
timer = Timer()
timer.start()
self.assertRaises(TimerError, timer.start)
@patch('utils.Timer.time')
def test_pause(self, time_mock):
timer = Timer()
time_mock.time.return_value = 1525628158.745443
timer.start()
time_mock.time.return_value = 1525628188.142346
timer.pause()
self.assertEqual(timer.pause_started, 1525628188)
def test_pause_not_started(self):
timer = Timer()
self.assertRaises(TimerError, timer.pause)
@patch('utils.Timer.time')
def test_pause_already_paused(self, time_mock):
time_mock.time.return_value = 1525628188.142346
timer = Timer()
timer.start()
timer.pause()
self.assertRaises(TimerError, timer.pause)
@patch('utils.Timer.time')
def test_resume(self, time_mock):
timer = Timer()
time_mock.time.return_value = 1525628158.745443
timer.start()
time_mock.time.return_value = 1525628188.142346
timer.pause()
time_mock.time.return_value = 1525628198.142346
timer.resume()
self.assertEqual(timer.amount_time_paused, 10)
self.assertEqual(timer.pause_started, None)
@patch('utils.Timer.time')
def test_resume_not_paused(self, time_mock):
timer = Timer()
time_mock.time.return_value = 1525628158.745443
timer.start()
self.assertRaises(TimerError, timer.resume)
def test_resume_not_started(self):
timer = Timer()
self.assertRaises(TimerError, timer.resume)
@patch('utils.Timer.time')
def test_get_elapsed_seconds(self, time_mock):
timer = Timer()
time_mock.time.return_value = 1525628158.745443
timer.start()
time_mock.time.return_value = 1525628188.142346
self.assertEqual(timer.get_elapsed_seconds(), 30)
@patch('utils.Timer.time')
def test_get_elapsed_seconds_initial_start_time(self, time_mock):
timer = Timer(20)
time_mock.time.return_value = 1525628158.745443
timer.start()
time_mock.time.return_value = 1525628188.142346
self.assertEqual(timer.get_elapsed_seconds(), 50)
@patch('utils.Timer.time')
def test_get_elapsed_seconds_paused(self, time_mock):
timer = Timer()
time_mock.time.return_value = 1525628158.745443
timer.start()
time_mock.time.return_value = 1525628188.142346
timer.pause()
time_mock.time.return_value = 1525628198.142346
self.assertEqual(timer.get_elapsed_seconds(), 30)
@patch('utils.Timer.time')
def test_get_elapsed_seconds_with_pause(self, time_mock):
timer = Timer()
time_mock.time.return_value = 1525628158.745443
timer.start()
time_mock.time.return_value = 1525628188.142346
timer.pause()
time_mock.time.return_value = 1525628198.142346
timer.resume()
time_mock.time.return_value = 1525628208.142346
self.assertEqual(timer.get_elapsed_seconds(), 40)
def test_get_elapsed_seconds_not_started(self):
timer = Timer()
self.assertEqual(timer.get_elapsed_seconds(), 0)
@patch('utils.Timer.time')
def test_get_current_timestamp_not_started(self, time_mock):
timer = Timer()
self.assertEqual(timer.get_current_timestamp(), '00h:00m:00s')
@patch('utils.Timer.time')
def test_get_current_timestamp(self, time_mock):
timer = Timer()
time_mock.time.return_value = 1525628158.745443
timer.start()
time_mock.time.return_value = 1525664910.142346
self.assertEqual(timer.get_current_timestamp(), '10h:12m:32s')
``` |
{
"source": "johnnyzhang295/MMGIS",
"score": 3
} |
#### File: auxiliary/rastertolegend/rastertolegend.py
```python
import os
import sys
import subprocess
from osgeo import gdal
from pathlib import Path
raster = sys.argv[1]
splitfilenameR = os.path.splitext(raster)
colorfile = sys.argv[2]
splitfilenameC = os.path.basename(colorfile).split(".")
discrete = ""
values = []
if len(sys.argv) > 3:
discrete = sys.argv[3]
def colorRelief(raster, colorfile, discrete):
exactOrNearest = ""
if discrete == "-discrete":
exactOrNearest = "-nearest_color_entry"
input_file = str(Path(raster).absolute())
output_file = str(Path(splitfilenameR[0] + "_" + splitfilenameC[0] + splitfilenameR[1]).absolute())
colorfile_path = str(Path(colorfile).absolute())
if exactOrNearest == "":
gdalDEMcr = ["gdaldem", "color-relief", input_file, colorfile_path, output_file]
else:
gdalDEMcr = ["gdaldem", "color-relief", exactOrNearest, input_file, colorfile_path, output_file]
print("Running:", " ".join(gdalDEMcr))
process = subprocess.Popen(gdalDEMcr, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
for output in process.stdout:
print(output.decode())
for error in process.stderr:
print(error.decode())
def colorToLegend(colorfile, min, max, discrete):
legend = open(splitfilenameR[0] + "_" + splitfilenameC[0] + "_legend.csv", "w")
legend.write("color,strokecolor,shape,value")
cf = open(colorfile)
percents = False
for line in cf:
split = line.split(" ", 1)
value = split[0]
if value[-1:] == "%":
value = split[0][:-1]
percents = True
if value.lower() != "nv":
values.append(float(value))
cf.close()
cf = open(colorfile)
highToLow = True
if values[0] < values[1]:
highToLow = False
if discrete == "-discrete":
if percents:
j = 0
for v in values:
values[j] = int(mapPercent(float(v)/100, min, max))
j += 1
i = 0
for line in cf:
if i > 0 and i < len(values) - 1:
value = str(values[i] - ((values[i] - values[i-1])/2)) + " - " + str(values[i] + ((values[i+1] - values[i])/2))
elif i == 0:
sign = str(int(min)) + " - "
if not percents:
sign = "< "
if highToLow:
sign = str(int(max)) + " - "
if not percents:
sign = "> "
value = sign + str((values[i+1] + values[i])/2)
elif i == len(values) - 1:
sign = " - " + str(int(max))
if not percents:
sign = "> "
if highToLow:
sign = " - " + str(int(min))
if not percents:
sign = "< "
value = str((values[i] + values[i-1])/2) + sign
if not percents:
value = sign + str((values[i] + values[i-1])/2)
split = line.split(" ", 1)
if split[0].lower() != "nv":
legend.write("\n" + rgb_to_hex(tuple(map(int, split[1].split()))) + ",black,square," + value)
i += 1
else:
for line in cf:
split = line.split(" ", 1)
value = split[0]
if value[-1:] == "%":
value = split[0][:-1]
if split[0].lower() != "nv":
legend.write("\n" + rgb_to_hex(tuple(map(int, split[1].split()))) + ",black,square," +
str(int(mapPercent(float(value)/100, min, max))))
legend.close()
cf.close()
# helper functions
def rgb_to_hex(rgb):
return '#%02x%02x%02x' % rgb
def mapPercent(p, min, max):
return ((max - min) * p) + min
r = gdal.Open(raster)
# stats[0] is min, stats[1] is max
stats = r.GetRasterBand(1).GetStatistics(1, 1)
colorRelief(raster, colorfile, discrete)
colorToLegend(colorfile, stats[0], stats[1], discrete)
```
#### File: private/api/BandsToProfile.py
```python
# example: BandsToProfile.py MSL_DEM_v3_webgis.tif -4.66086473 137.36935616 [[0,7],9]
# 2ptsToProfile.py MSL_DEM_v3_webgis.tif -4.67053145 137.36515045 -4.66086473 137.36935616 10 1
import sys
import ast
import re
import math
from osgeo import gdal
from osgeo import osr
from osgeo.gdalconst import *
from osgeo import __version__ as osgeoversion
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
# Make gdal use exceptions instead of their own errors so that they can be caught
gdal.UseExceptions()
def getValueAtBand(b):
try:
# Get the band
band = ds.GetRasterBand(b)
value = band.ReadAsArray(
pixelLatLonPair[0][0], pixelLatLonPair[0][1], 1, 1)[0][0]
bandName = band.GetDescription()
if isinstance(bandName, str):
matchedName = re.findall(r"[-+]?\d*\.\d+|\d+", bandName)
if len(matchedName) > 0:
bandName = float(matchedName[0])
else:
bandName = b
else:
bandName = b
# check for nan
if value != value:
value = None
bandName = None
elif value == band.GetNoDataValue():
value = None
except Exception as e:
# -1100101 = (e)rror
value = None
bandName = None
return [bandName, value]
# Takes in a [[x,y],[x,y],[x,y],[x,y]...[x,y],[x,y]]
# and returns an array of values on the raster at those points in order
def getRasterDataValues():
valuesArray = []
for i in range(0, len(bands)):
# an int or an array of int
# if part needs work (safer to pass bands: "[[x,y]]" now)
if(isinstance(bands[i], int)):
value = getValueAtBand(bands[i])
print(value)
valuesArray.append(value)
else:
# +1 for inclusivity
for j in range(bands[i][0], bands[i][1] + 1):
value = getValueAtBand(j)
valuesArray.append(value)
return valuesArray
# Takes in a [[lat,lon],[lat,lon]...[lat,lon]]
# and returns [[pixel,pixel][pixel,pixel]...[pixel,pixel]]
# based on the predeclared ds (gdal.open(raster))
def latLonsToPixel(latLonPairs):
# get georeference info
transform = ds.GetGeoTransform()
xOrigin = transform[0]
yOrigin = transform[3]
pixelWidth = transform[1]
pixelHeight = transform[5]
# Create a spatial reference object for the dataset
srs = osr.SpatialReference()
if int(osgeoversion[0]) >= 3:
# GDAL 3 changes axis order: https://github.com/OSGeo/gdal/issues/1546
srs.SetAxisMappingStrategy(osr.OAMS_TRADITIONAL_GIS_ORDER)
srs.ImportFromWkt(ds.GetProjection())
# Set up the coordinate transformation object
srsLatLong = srs.CloneGeogCS()
ct = osr.CoordinateTransformation(srsLatLong, srs)
# Go through all the point pairs and translate them to latitude/longitude pairings
pixelPairs = []
for point in latLonPairs:
# Change the point locations into the GeoTransform space
(point[1], point[0], holder) = ct.TransformPoint(point[1], point[0])
# Translate the x and y coordinates into pixel values
x = (point[1] - xOrigin) / pixelWidth
y = (point[0] - yOrigin) / pixelHeight
if math.isinf(x):
x = 0
if math.isinf(y):
y = 0
# Add the point to our return array
pixelPairs.append([int(x), int(y)])
return pixelPairs
# Get arguments
raster = unquote(sys.argv[1]) # path
lat = float(sys.argv[2]) # x
lon = float(sys.argv[3]) # y
if str(sys.argv[4]).isalnum():
type = str(sys.argv[4]) # xyorll
bands = ast.literal_eval(unquote(sys.argv[5])) # bands
latLonPair = [[lat, lon]]
# Open the image
ds = gdal.Open(raster, GA_ReadOnly)
if ds is None:
print("Could not open image")
sys.exit(1)
# Convert latlon to image space pixels
if type == 'll':
pixelLatLonPair = latLonsToPixel(latLonPair)
else:
pixelLatLonPair = latLonPair
pixelLatLonPair[0][0] = int(pixelLatLonPair[0][0])
pixelLatLonPair[0][1] = int(pixelLatLonPair[0][1])
# Find the raster value at each of those points
valueArray = getRasterDataValues()
print(valueArray)
``` |
{
"source": "JohnnyZhang917/canmatrix",
"score": 2
} |
#### File: canmatrix/formats/dbc.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import decimal
import logging
import math
import re
import typing
from builtins import *
import canmatrix
logger = logging.getLogger(__name__)
default_float_factory = decimal.Decimal
def normalizeName(name, whitespaceReplacement):
name = re.sub(r'\s+', whitespaceReplacement, name)
if ' ' in name:
name = '"' + name + '"'
return name
def format_float(f):
s = str(f).upper()
if s.endswith('.0'):
s = s[:-2]
if 'E' in s:
tmp = s.split('E')
s = '%sE%s%s' % (tmp[0], tmp[1][0], tmp[1][1:].rjust(3, '0'))
return s.upper()
def check_define(define):
# check if define is compatible with dbc. else repace by STRING
if define.type not in ["ENUM", "STRING", "INT", "HEX", "FLOAT"]:
logger.warn("dbc export of attribute type {} not supported; replaced by STRING".format(define.type))
define.definition = "STRING"
define.type = "STRING"
def create_define(data_type, define, define_type, defaults):
check_define(define)
define_string = "BA_DEF_ " + define_type
define_string += ' "' + data_type + '" '
define_string += define.definition + ';\n'
if data_type not in defaults and define.defaultValue is not None:
if define.type == "ENUM" or define.type == "STRING":
defaults[data_type] = '"' + define.defaultValue + '"'
else:
defaults[data_type] = define.defaultValue
return define_string
def create_attribute_string(attribute, attribute_class, name, value, is_string):
if is_string:
value = '"' + value + '"'
elif not value:
value = '""'
attribute_string = 'BA_ "' + attribute + '" ' + attribute_class + ' ' + name + ' ' + str(value) + ';\n'
return attribute_string
def create_comment_string(comment_class, comment_ident, comment, dbcExportEncoding, dbcExportCommentEncoding):
if len(comment) == 0:
return b""
comment_string = ("CM_ " + comment_class + " " + comment_ident + ' "').encode(dbcExportEncoding, 'ignore')
comment_string += comment.replace('"', '\\"').encode(dbcExportCommentEncoding, 'ignore')
comment_string += '";\n'.encode(dbcExportEncoding)
return comment_string
def dump(mydb, f, **options):
# create copy because export changes database
db = copy.deepcopy(mydb)
dbcExportEncoding = options.get("dbcExportEncoding", 'iso-8859-1')
dbcExportCommentEncoding = options.get("dbcExportCommentEncoding", dbcExportEncoding)
writeValTable = options.get("writeValTable", True)
compatibility = options.get('compatibility', True)
whitespaceReplacement = options.get("whitespaceReplacement", '_')
if whitespaceReplacement in ['', None] or set(
[' ', '\t']).intersection(whitespaceReplacement):
print("Warning: Settings may result in whitespace in DBC variable names. This is not supported by the DBC format.")
if db.contains_fd or db.contains_j1939:
if db.contains_fd:
db.add_global_defines("BusType", "STRING")
db.add_attribute("BusType", "CAN FD")
elif db.contains_j1939:
db.add_global_defines("ProtocolType", "STRING")
db.add_attribute("ProtocolType", "J1939")
db.add_frame_defines("VFrameFormat", 'ENUM "StandardCAN","ExtendedCAN","StandardCAN_FD","ExtendedCAN_FD","J1939PG"')
for frame in db.frames:
if frame.is_fd:
if frame.arbitration_id.extended:
frame.add_attribute("VFrameFormat", "ExtendedCAN_FD")
else:
frame.add_attribute("VFrameFormat", "StandardCAN_FD")
elif frame.is_j1939:
frame.add_attribute("VFrameFormat", "J1939PG")
else:
if frame.arbitration_id.extended:
frame.add_attribute("VFrameFormat", "ExtendedCAN")
else:
frame.add_attribute("VFrameFormat", "StandardCAN")
db.enum_attribs_to_keys()
# free signals are in special frame in dbc...
if len(db.signals) > 0:
free_signals_dummy_frame = canmatrix.Frame("VECTOR__INDEPENDENT_SIG_MSG")
free_signals_dummy_frame.arbitration_id = canmatrix.ArbitrationId(0x40000000, extended=True)
free_signals_dummy_frame.signals = db.signals
db.addFrame(free_signals_dummy_frame)
# shorten long enviroment variable names
for envVarName in db.env_vars:
if len(envVarName) > 32:
db.add_env_attribute(envVarName, "SystemEnvVarLongSymbol", envVarName)
db.env_vars[envVarName[:32]] = db.env_vars.pop(envVarName)
db.add_env_defines("SystemEnvVarLongSymbol", "STRING")
header = "VERSION \"created by canmatrix\"\n\n\nNS_ :\n\nBS_:\n\n"
f.write(header.encode(dbcExportEncoding))
# ECUs
f.write("BU_: ".encode(dbcExportEncoding))
for ecu in db.ecus:
# fix long ecu names:
if len(ecu.name) > 32:
ecu.add_attribute("SystemNodeLongSymbol", ecu.name)
ecu.name = ecu.name[0:32]
db.add_ecu_defines("SystemNodeLongSymbol", "STRING")
f.write((ecu.name + " ").encode(dbcExportEncoding))
f.write("\n\n".encode(dbcExportEncoding))
if writeValTable:
# ValueTables
for table in sorted(db.value_tables):
f.write(("VAL_TABLE_ " + table).encode(dbcExportEncoding))
for row in db.value_tables[table]:
f.write(
(' ' +
str(row) +
' "' +
db.value_tables[table][row] +
'"').encode(dbcExportEncoding))
f.write(";\n".encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
output_names = collections.defaultdict(dict)
for frame in db.frames:
# fix long frame names
if len(frame.name) > 32:
frame.add_attribute("SystemMessageLongSymbol", frame.name)
frame.name = frame.name[0:32]
db.add_frame_defines("SystemMessageLongSymbol", "STRING")
# fix long signal names
for s in frame.signals:
if len(s.name) > 32:
s.add_attribute("SystemSignalLongSymbol", s.name)
s.name = s.name[0:32]
db.add_signal_defines("SystemSignalLongSymbol", "STRING")
normalized_names = collections.OrderedDict((
(s, normalizeName(s.name, whitespaceReplacement))
for s in frame.signals
))
# remove "-" from frame names
if compatibility:
frame.name = re.sub("[^A-Za-z0-9]", whitespaceReplacement, frame.name)
duplicate_signal_totals = collections.Counter(normalized_names.values())
duplicate_signal_counter = collections.Counter()
numbered_names = collections.OrderedDict()
for signal in frame.signals:
name = normalized_names[signal]
if compatibility:
name = re.sub("[^A-Za-z0-9]",whitespaceReplacement, name)
duplicate_signal_counter[name] += 1
if duplicate_signal_totals[name] > 1:
# TODO: pad to 01 in case of 10+ instances, for example?
name += str(duplicate_signal_counter[name] - 1)
output_names[frame][signal] = name
# Frames
for frame in db.frames:
multiplex_written = False
if frame.transmitters.__len__() == 0:
frame.add_transmitter("Vector__XXX")
f.write(
("BO_ %d " %
frame.arbitration_id.to_compound_integer() +
frame.name +
": %d " %
frame.size +
frame.transmitters[0] +
"\n").encode(dbcExportEncoding))
duplicate_signal_totals = collections.Counter(
normalizeName(s.name, whitespaceReplacement) for s in frame.signals
)
duplicate_signal_counter = collections.Counter()
for signal in frame.signals:
if signal.multiplex == 'Multiplexor' and multiplex_written and not frame.is_complex_multiplexed:
continue
signal_line = " SG_ " + output_names[frame][signal] + " "
if signal.mux_val is not None:
signal_line += "m{}".format(int(signal.mux_val))
if signal.multiplex != 'Multiplexor':
signal_line += " "
if signal.multiplex == 'Multiplexor':
signal_line += "M "
multiplex_written = True
startbit = signal.get_startbit(bit_numbering=1)
if signal.is_signed:
sign = '-'
else:
sign = '+'
signal_line += (": %d|%d@%d%c" %
(startbit,
signal.size,
signal.is_little_endian,
sign))
signal_line += " (%s,%s)" % (format_float(signal.factor), format_float(signal.offset))
signal_line += " [{}|{}]".format(format_float(signal.min),format_float(signal.max))
signal_line += ' "'
if signal.unit is None:
signal.unit = ""
signal_line += signal.unit
signal_line += '" '
if signal.receivers.__len__() == 0:
signal.add_receiver('Vector__XXX')
signal_line += ','.join(signal.receivers) + "\n"
f.write(signal_line.encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
# second Sender:
for frame in db.frames:
if frame.transmitters.__len__() > 1:
f.write(("BO_TX_BU_ %d : %s;\n" % (frame.arbitration_id.to_compound_integer(), ','.join(frame.transmitters))).encode(dbcExportEncoding))
# frame comments
# wow, there are dbcs where comments are encoded with other coding than rest of dbc...
for frame in db.frames:
f.write(create_comment_string("BO_", "%d " % frame.arbitration_id.to_compound_integer(), frame.comment, dbcExportEncoding, dbcExportCommentEncoding))
f.write("\n".encode(dbcExportEncoding))
# signal comments
for frame in db.frames:
for signal in frame.signals:
if signal.comment is not None and signal.comment.__len__() > 0:
name = output_names[frame][signal]
f.write(create_comment_string("SG_", "%d " % frame.arbitration_id.to_compound_integer() + name, signal.comment, dbcExportEncoding,
dbcExportCommentEncoding))
f.write("\n".encode(dbcExportEncoding))
# ecu comments
for ecu in db.ecus:
if ecu.comment is not None and ecu.comment.__len__() > 0:
f.write(create_comment_string("BU_", ecu.name, ecu.comment, dbcExportEncoding,
dbcExportCommentEncoding))
f.write("\n".encode(dbcExportEncoding))
defaults = {}
# write defines
for (data_type, define) in sorted(list(db.frame_defines.items())):
f.write(create_define(data_type, define, "BO_", defaults).encode(dbcExportEncoding, 'replace'))
for (data_type, define) in sorted(list(db.signal_defines.items())):
f.write(create_define(data_type, define, "SG_", defaults).encode(dbcExportEncoding, 'replace'))
for (data_type, define) in sorted(list(db.ecu_defines.items())):
f.write(create_define(data_type, define, "BU_", defaults).encode(dbcExportEncoding, 'replace'))
for (data_type, define) in sorted(list(db.env_defines.items())):
f.write(create_define(data_type, define, "EV_", defaults).encode(dbcExportEncoding, 'replace'))
for (data_type, define) in sorted(list(db.global_defines.items())):
f.write(create_define(data_type, define, "", defaults).encode(dbcExportEncoding, 'replace'))
for define in sorted(defaults):
f.write(('BA_DEF_DEF_ "' + define + '" ').encode(dbcExportEncoding) +
defaults[define].encode(dbcExportEncoding,'replace') + ';\n'.encode(dbcExportEncoding))
# ecu-attributes:
for ecu in db.ecus:
for attrib, val in sorted(ecu.attributes.items()):
f.write(create_attribute_string(attrib, "BU_", ecu.name, val, db.ecu_defines[attrib].type == "STRING").encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
# global-attributes:
for attrib, val in sorted(db.attributes.items()):
f.write(create_attribute_string(attrib, "", "", val, db.global_defines[attrib].type == "STRING").encode(
dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
# messages-attributes:
for frame in db.frames:
for attrib, val in sorted(frame.attributes.items()):
f.write(create_attribute_string(attrib, "BO_", str(frame.arbitration_id.to_compound_integer()), val, db.frame_defines[attrib].type == "STRING").encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
# signal-attributes:
for frame in db.frames:
for signal in frame.signals:
for attrib, val in sorted(signal.attributes.items()):
name = output_names[frame][signal]
if isinstance(val, float):
val = format_float(val)
f.write(create_attribute_string(attrib, "SG_", '%d ' % frame.arbitration_id.to_compound_integer() + name, val,
db.signal_defines[attrib].type == "STRING").encode(dbcExportEncoding))
f.write("\n".encode(dbcExportEncoding))
for env_var_name, env_var in db.env_vars.items():
if "attributes" in env_var:
for attribute, value in env_var["attributes"].items():
f.write(create_attribute_string(attribute, "EV_", "", value,
db.env_defines[attribute].type == "STRING").encode(dbcExportEncoding))
# signal-values:
for frame in db.frames:
multiplex_written = False
for signal in frame.signals:
if signal.multiplex == 'Multiplexor' and multiplex_written:
continue
multiplex_written = True
if signal.values:
f.write(
('VAL_ %d ' %
frame.arbitration_id.to_compound_integer() +
output_names[frame][signal]).encode(dbcExportEncoding))
for attrib, val in sorted(
signal.values.items(), key=lambda x: int(x[0])):
f.write(
(' ' + str(attrib) + ' "' + val + '"').encode(dbcExportEncoding))
f.write(";\n".encode(dbcExportEncoding))
# SIG_VALTYPE
for frame in db.frames:
for signal in frame.signals:
if signal.is_float:
if int(signal.size) > 32:
f.write(('SIG_VALTYPE_ %d %s : 2;\n' % (frame.arbitration_id.to_compound_integer(), output_names[frame][signal])).encode(
dbcExportEncoding))
else:
f.write(('SIG_VALTYPE_ %d %s : 1;\n' % (frame.arbitration_id.to_compound_integer(), output_names[frame][signal])).encode(
dbcExportEncoding))
# signal-groups:
for frame in db.frames:
for sigGroup in frame.signalGroups:
f.write(("SIG_GROUP_ " + str(frame.arbitration_id.to_compound_integer()) + " " + sigGroup.name +
" " + str(sigGroup.id) + " :").encode(dbcExportEncoding))
for signal in sigGroup.signals:
f.write((" " + output_names[frame][signal]).encode(dbcExportEncoding))
f.write(";\n".encode(dbcExportEncoding))
for frame in db.frames:
if frame.is_complex_multiplexed:
for signal in frame.signals:
if signal.muxer_for_signal is not None:
f.write(("SG_MUL_VAL_ %d %s %s %d-%d;\n" % (frame.arbitration_id.to_compound_integer(), signal.name, signal.muxer_for_signal, signal.mux_val_min, signal.mux_val_max)).encode(dbcExportEncoding))
for envVarName in db.env_vars:
envVar = db.env_vars[envVarName]
f.write(("EV_ {0} : {1} [{2}|{3}] \"{4}\" {5} {6} {7} {8};\n".format(envVarName, envVar["varType"], envVar["min"],
envVar["max"], envVar["unit"],envVar["initialValue"],
envVar["evId"], envVar["accessType"],
",".join(envVar["accessNodes"])) ).encode(dbcExportEncoding))
def load(f, **options):
dbcImportEncoding = options.get("dbcImportEncoding", 'iso-8859-1')
dbcCommentEncoding = options.get("dbcImportCommentEncoding", dbcImportEncoding)
float_factory = options.get('float_factory', default_float_factory)
i = 0
class FollowUps(object):
nothing, signalComment, frameComment, boardUnitComment, globalComment = list(
range(5))
followUp = FollowUps.nothing
comment = ""
signal = None # type: typing.Optional[canmatrix.Signal]
frame = None
boardUnit = None
db = canmatrix.CanMatrix()
framesById = {} # type: typing.Dict[int, canmatrix.Frame]
def hash_arbitration_id(arbitration_id): # type: (canmatrix.ArbitrationId) -> int
return hash((arbitration_id.id, arbitration_id.extended))
def get_frame_by_id(arbitration_id): # type: (canmatrix.ArbitrationId) -> typing.Optional[canmatrix.Frame]
try:
return framesById[hash_arbitration_id(arbitration_id)]
except KeyError:
return None
def add_frame_by_id(frame): # type: (canmatrix.Frame) -> None
framesById[hash_arbitration_id(frame.arbitration_id)] = frame
for line in f:
i = i + 1
l = line.strip()
if l.__len__() == 0:
continue
try:
# if 1==1:
if followUp == FollowUps.signalComment:
try:
comment += "\n" + \
l.decode(dbcCommentEncoding).replace('\\"', '"')
except:
logger.error("Error decoding line: %d (%s)" % (i, line))
if l.endswith(b'";'):
followUp = FollowUps.nothing
if signal is not None:
signal.add_comment(comment[0:-2])
continue
elif followUp == FollowUps.frameComment:
try:
comment += "\n" + \
l.decode(dbcCommentEncoding).replace('\\"', '"')
except:
logger.error("Error decoding line: %d (%s)" % (i, line))
if l.endswith(b'";'):
followUp = FollowUps.nothing
if frame is not None:
frame.add_comment(comment[0:-2])
continue
elif followUp == FollowUps.boardUnitComment:
try:
comment += "\n" + \
l.decode(dbcCommentEncoding).replace('\\"', '"')
except:
logger.error("Error decoding line: %d (%s)" % (i, line))
if l.endswith(b'";'):
followUp = FollowUps.nothing
if boardUnit is not None:
boardUnit.add_comment(comment[0:-2])
continue
decoded = l.decode(dbcImportEncoding).strip()
if decoded.startswith("BO_ "):
regexp = re.compile(r"^BO_ ([^\ ]+) ([^\ ]+) *: ([^\ ]+) ([^\ ]+)")
temp = regexp.match(decoded)
# db.frames.addFrame(Frame(temp.group(1), temp.group(2), temp.group(3), temp.group(4)))
frame = canmatrix.Frame(temp.group(2),arbitration_id = int(temp.group(1)),
size=int(temp.group(3)), transmitters=temp.group(4).split())
db.frames.append(frame)
add_frame_by_id(frame)
elif decoded.startswith("SG_ "):
pattern = r"^SG_ +(\w+) *: *(\d+)\|(\d+)@(\d+)([\+|\-]) +\(([0-9.+\-eE]+),([0-9.+\-eE]+)\) +\[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] +\"(.*)\" +(.*)"
regexp = re.compile(pattern)
temp = regexp.match(decoded)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp_raw = regexp_raw.match(l)
if temp:
receiver = [b.strip() for b in temp.group(11).split(',')]
extras = {}
# if float_factory is not None:
# extras['float_factory'] = float_factory
tempSig = canmatrix.Signal(
temp.group(1),
start_bit=int(temp.group(2)),
size=int(temp.group(3)),
is_little_endian=(int(temp.group(4)) == 1),
is_signed=(temp.group(5) == '-'),
factor=temp.group(6),
offset=temp.group(7),
min=temp.group(8),
max=temp.group(9),
unit=temp_raw.group(10).decode(dbcImportEncoding),
receivers=receiver,
**extras
)
if not tempSig.is_little_endian:
# startbit of motorola coded signals are MSB in dbc
tempSig.set_startbit(int(temp.group(2)), bitNumbering=1)
frame.add_signal(tempSig)
# db.frames.addSignalToLastFrame(tempSig)
else:
pattern = r"^SG_ +(.+?) +(.+?) *: *(\d+)\|(\d+)@(\d+)([\+|\-]) +\(([0-9.+\-eE]+),([0-9.+\-eE]+)\) +\[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] +\"(.*)\" +(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
receiver = [b.strip() for b in temp.group(12).split(',')]
multiplex = temp.group(2) # type: typing.Union[str, int]
is_complex_multiplexed = False
if multiplex == 'M':
multiplex = 'Multiplexor'
elif multiplex.endswith('M'):
is_complex_multiplexed = True
multiplex = multiplex[:-1]
if multiplex != 'Multiplexor':
try:
multiplex = int(multiplex[1:])
except:
raise Exception('error decoding line',line)
extras = {}
# if float_factory is not None:
# extras['float_factory'] = float_factory
tempSig = canmatrix.Signal(
temp.group(1),
start_bit=int(temp.group(3)),
size=int(temp.group(4)),
is_little_endian=(int(temp.group(5)) == 1),
is_signed=(temp.group(6) == '-'),
factor=temp.group(7),
offset=temp.group(8),
min=temp.group(9),
max=temp.group(10),
unit=temp_raw.group(11).decode(dbcImportEncoding),
receivers=receiver,
multiplex=multiplex,
**extras
)
if is_complex_multiplexed:
tempSig.is_multiplexer = True
tempSig.multiplex = 'Multiplexor'
if not tempSig.is_little_endian:
# startbit of motorola coded signals are MSB in dbc
tempSig.set_startbit(int(temp.group(3)), bitNumbering=1)
frame.add_signal(tempSig)
if is_complex_multiplexed:
frame.is_complex_multiplexed = True
elif decoded.startswith("BO_TX_BU_ "):
regexp = re.compile(r"^BO_TX_BU_ ([0-9]+) *: *(.+);")
temp = regexp.match(decoded)
frame = get_frame_by_id(canmatrix.ArbitrationId.from_compound_integer(int(temp.group(1))))
for ecu_name in temp.group(2).split(','):
frame.add_transmitter(ecu_name)
elif decoded.startswith("CM_ SG_ "):
pattern = r"^CM_ +SG_ +(\w+) +(\w+) +\"(.*)\";"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
frame = get_frame_by_id(canmatrix.ArbitrationId.from_compound_integer(int(temp.group(1))))
signal = frame.signal_by_name(temp.group(2))
if signal:
try:
signal.add_comment(temp_raw.group(3).decode(
dbcCommentEncoding).replace('\\"', '"'))
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
else:
pattern = r"^CM_ +SG_ +(\w+) +(\w+) +\"(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
frame = get_frame_by_id(canmatrix.ArbitrationId.from_compound_integer(int(temp.group(1))))
signal = frame.signal_by_name(temp.group(2))
try:
comment = temp_raw.group(3).decode(
dbcCommentEncoding).replace('\\"', '"')
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
followUp = FollowUps.signalComment
elif decoded.startswith("CM_ BO_ "):
pattern = r"^CM_ +BO_ +(\w+) +\"(.*)\";"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
frame = get_frame_by_id(canmatrix.ArbitrationId.from_compound_integer(int(temp.group(1))))
if frame:
try:
frame.add_comment(temp_raw.group(2).decode(
dbcCommentEncoding).replace('\\"', '"'))
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
else:
pattern = r"^CM_ +BO_ +(\w+) +\"(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
frame = get_frame_by_id(canmatrix.ArbitrationId.from_compound_integer(int(temp.group(1))))
try:
comment = temp_raw.group(2).decode(
dbcCommentEncoding).replace('\\"', '"')
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
followUp = FollowUps.frameComment
elif decoded.startswith("CM_ BU_ "):
pattern = r"^CM_ +BU_ +(\w+) +\"(.*)\";"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
boardUnit = db.ecu_by_name(temp.group(1))
if boardUnit:
try:
boardUnit.add_comment(temp_raw.group(2).decode(
dbcCommentEncoding).replace('\\"', '"'))
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
else:
pattern = r"^CM_ +BU_ +(\w+) +\"(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
boardUnit = db.ecu_by_name(temp.group(1))
if boardUnit:
try:
comment = temp_raw.group(2).decode(
dbcCommentEncoding).replace('\\"', '"')
except:
logger.error(
"Error decoding line: %d (%s)" %
(i, line))
followUp = FollowUps.boardUnitComment
elif decoded.startswith("BU_:"):
pattern = r"^BU_\:(.*)"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
if temp:
myTempListe = temp.group(1).split(' ')
for ele in myTempListe:
if len(ele.strip()) > 1:
db.ecus.append(canmatrix.Ecu(ele))
elif decoded.startswith("VAL_ "):
regexp = re.compile(r"^VAL_ +(\w+) +(\w+) +(.*);")
temp = regexp.match(decoded)
if temp:
frame_id = temp.group(1)
signal_name = temp.group(2)
tempList = temp.group(3).split('"')
if frame_id.isnumeric(): # value for Frame
try:
frame = get_frame_by_id(canmatrix.ArbitrationId.from_compound_integer(int(frame_id)))
sg = frame.signal_by_name(signal_name)
for i in range(math.floor(len(tempList) / 2)):
val = tempList[i * 2 + 1]
if sg:
sg.add_values(tempList[i * 2], val)
except:
logger.error("Error with Line: " + str(tempList))
else:
logger.info("Warning: enviroment variables currently not supported")
elif decoded.startswith("VAL_TABLE_ "):
regexp = re.compile(r"^VAL_TABLE_ +(\w+) +(.*);")
temp = regexp.match(decoded)
if temp:
tableName = temp.group(1)
tempList = temp.group(2).split('"')
try:
valHash = {}
for i in range(math.floor(len(tempList) / 2)):
val = tempList[i * 2 + 1]
valHash[tempList[i * 2].strip()] = val.strip()
except:
logger.error("Error with Line: " + str(tempList))
db.add_value_table(tableName, valHash)
else:
logger.debug(l)
elif decoded.startswith("BA_DEF_") and decoded[7:].strip()[:3] in ["SG_", "BO_", "BU_", "EV_"]:
substring = decoded[7:].strip()
define_type = substring[:3]
substring = substring[3:].strip()
pattern = r"^\"(.+?)\" +(.+);"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(substring)
substring_line = l[7:].strip()[3:].strip()
temp_raw = regexp_raw.match(substring_line)
if temp:
if define_type == "SG_":
db.add_signal_defines(temp.group(1), temp_raw.group(2).decode(dbcImportEncoding))
elif define_type == "BO_":
db.add_frame_defines(temp.group(1), temp_raw.group(2).decode(dbcImportEncoding))
elif define_type == "BU_":
db.add_ecu_defines(temp.group(1), temp_raw.group(2).decode(dbcImportEncoding))
elif define_type == "EV_":
db.add_env_defines(temp.group(1), temp_raw.group(2).decode(dbcImportEncoding))
elif decoded.startswith("BA_DEF_ "):
pattern = r"^BA_DEF_ +\"(.+?)\" +(.+);"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
db.add_global_defines(temp.group(1),
temp_raw.group(2).decode(dbcImportEncoding))
elif decoded.startswith("BA_ "):
regexp = re.compile(r"^BA_ +\".+?\" +(.+)")
tempba = regexp.match(decoded)
if tempba.group(1).strip().startswith("BO_ "):
regexp = re.compile(r"^BA_ +\"(.+?)\" +BO_ +(\d+) +(.+);")
temp = regexp.match(decoded)
get_frame_by_id(canmatrix.ArbitrationId.from_compound_integer(int(temp.group(2)))).add_attribute(
temp.group(1), temp.group(3))
elif tempba.group(1).strip().startswith("SG_ "):
regexp = re.compile(r"^BA_ +\"(.+?)\" +SG_ +(\d+) +(\w+) +(.+);")
temp = regexp.match(decoded)
if temp != None:
get_frame_by_id(canmatrix.ArbitrationId.from_compound_integer(int(temp.group(2)))).signal_by_name(
temp.group(3)).add_attribute(temp.group(1), temp.group(4))
elif tempba.group(1).strip().startswith("EV_ "):
regexp = re.compile(r"^BA_ +\"(.+?)\" +EV_ +(\w+) +(.*);")
temp = regexp.match(decoded)
if temp != None:
db.add_env_attribute(temp.group(2),temp.group(1),temp.group(3))
elif tempba.group(1).strip().startswith("BU_ "):
regexp = re.compile(r"^BA_ +\"(.*?)\" +BU_ +(\w+) +(.+);")
temp = regexp.match(decoded)
db.ecu_by_name(
temp.group(2)).add_attribute(
temp.group(1),
temp.group(3))
else:
regexp = re.compile(
r"^BA_ +\"([A-Za-z0-9\-_]+)\" +([\"\w\-\.]+);")
temp = regexp.match(decoded)
if temp:
db.add_attribute(temp.group(1), temp.group(2))
elif decoded.startswith("SIG_GROUP_ "):
regexp = re.compile(r"^SIG_GROUP_ +(\w+) +(\w+) +(\w+) +\:(.*);")
temp = regexp.match(decoded)
frame = get_frame_by_id(canmatrix.ArbitrationId.from_compound_integer(int(temp.group(1))))
if frame is not None:
signalArray = temp.group(4).split(' ')
frame.add_signal_group(temp.group(2), temp.group(3), signalArray) # todo wrong annotation in canmatrix? Id is a string?
elif decoded.startswith("SIG_VALTYPE_ "):
regexp = re.compile(r"^SIG_VALTYPE_ +(\w+) +(\w+)\s*\:(.*);")
temp = regexp.match(decoded)
frame = get_frame_by_id(canmatrix.ArbitrationId.from_compound_integer(int(temp.group(1))))
if frame:
signal = frame.signal_by_name(temp.group(2))
signal.is_float = True
# SIG_VALTYPE_ 0 float : 1;
elif decoded.startswith("BA_DEF_DEF_ "):
pattern = r"^BA_DEF_DEF_ +\"(.+?)\" +(.+?)\;"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
db.add_define_default(temp.group(1),
temp_raw.group(2).decode(dbcImportEncoding))
elif decoded.startswith("SG_MUL_VAL_ "):
pattern = r"^SG_MUL_VAL_ +([0-9]+) +([\w\-]+) +([\w\-]+) +([0-9]+)\-([0-9]+) *;"
regexp = re.compile(pattern)
regexp_raw = re.compile(pattern.encode(dbcImportEncoding))
temp = regexp.match(decoded)
temp_raw = regexp_raw.match(l)
if temp:
frameId = temp.group(1)
signalName = temp.group(2)
muxerForSignal = temp.group(3)
muxValMin = int(temp.group(4))
muxValMax = int(temp.group(4))
frame = get_frame_by_id(canmatrix.ArbitrationId.from_compound_integer(int(frameId)))
if frame is not None:
signal = frame.signal_by_name(signalName)
frame.is_complex_multiplexed = True
signal.muxer_for_signal = muxerForSignal
signal.mux_val_min = muxValMin
signal.mux_val_max = muxValMax
elif decoded.startswith("EV_ "):
pattern = r"^EV_ +([\w\-\_]+?) *\: +([0-9]+) +\[([0-9.+\-eE]+)\|([0-9.+\-eE]+)\] +\"(.*?)\" +([0-9.+\-eE]+) +([0-9.+\-eE]+) +([\w\-]+?) +(.*);"
regexp = re.compile(pattern)
temp = regexp.match(decoded)
varName = temp.group(1)
varType = temp.group(2)
min = temp.group(3)
max = temp.group(4)
unit = temp.group(5)
initialValue = temp.group(6)
evId = temp.group(7)
accessType = temp.group(8)
accessNodes = temp.group(9).split(",")
db.add_env_var(varName, {"varType": varType, "min" : min, "max" : max,
"unit" : unit, "initialValue" : initialValue, "evId" : evId,
"accessType" : accessType, "accessNodes" : accessNodes})
# else:
except:
print ("error with line no: %d" % i)
print (line)
# Backtracking
for env_var_name, env_var in db.env_vars.items():
if 'SystemEnvVarLongSymbol' in env_var.get("attributes", ""):
long_name = env_var["attributes"]["SystemEnvVarLongSymbol"][1:-1]
del(env_var["attributes"]["SystemEnvVarLongSymbol"])
db.env_vars[long_name] = db.env_vars.pop(env_var_name)
for ecu in db.ecus:
if ecu.attributes.get("SystemNodeLongSymbol", None) is not None:
ecu.name = ecu.attributes.get("SystemNodeLongSymbol")[1:-1]
ecu.del_attribute("SystemNodeLongSymbol")
for frame in db.frames:
if frame.attributes.get("SystemMessageLongSymbol", None) is not None:
frame.name = frame.attributes.get("SystemMessageLongSymbol")[1:-1]
frame.del_attribute("SystemMessageLongSymbol")
# receiver is only given in the signals, so do propagate the receiver
# to the frame:
frame.update_receiver()
# extended-flag is implicite in canid, thus repair this:
#if frame.id > 0x80000000:
# frame.id -= 0x80000000
# frame.extended = 1
for signal in frame.signals:
if signal.attribute("SystemSignalLongSymbol") is not None:
signal.name = signal.attribute("SystemSignalLongSymbol")[1:-1]
signal.del_attribute("SystemSignalLongSymbol")
for define in db.global_defines:
if db.global_defines[define].type == "STRING":
if define in db.attributes:
db.attributes[define] = db.attributes[define][1:-1]
for define in db.ecu_defines:
if db.ecu_defines[define].type == "STRING":
for ecu in db.ecus:
if define in ecu.attributes:
ecu.attributes[define] = ecu.attributes[define][1:-1]
for define in db.frame_defines:
if db.frame_defines[define].type == "STRING":
for frame in db.frames:
if define in frame.attributes:
frame.attributes[define] = frame.attributes[define][1:-1]
for define in db.signal_defines:
if db.signal_defines[define].type == "STRING":
for frame in db.frames:
for signal in frame.signals:
if define in signal.attributes:
signal.attributes[define] = signal.attributes[define][1:-1]
db.enum_attribs_to_values()
for frame in db.frames:
if "_FD" in frame.attributes.get("VFrameFormat", ""):
frame.is_fd = True
if "J1939PG" in frame.attributes.get("VFrameFormat", ""):
frame.is_j1939 = True
db.update_ecu_list()
db.del_ecu("Vector__XXX")
free_signals_dummy_frame = db.frame_by_name("VECTOR__INDEPENDENT_SIG_MSG")
if free_signals_dummy_frame is not None and free_signals_dummy_frame.arbitration_id.id == 0x40000000:
db.signals = free_signals_dummy_frame.signals
db.del_frame(free_signals_dummy_frame)
return db
```
#### File: canmatrix/tests/test_dbc.py
```python
import io
import textwrap
import string
import pytest
import canmatrix.formats.dbc
def test_long_signal_name_imports():
long_signal_name = u'FAILURE_ZELL_UNTERTEMPERATUR_ENTLADEN_ALARM_IDX_01'
assert len(long_signal_name) > 32
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 1 testFrame1: 1 TEST_ECU
SG_ someShortenedDummyName: 1|2@0+ (1,0) [0|0] "" CCL_TEST
BA_ "SystemSignalLongSymbol" SG_ 1 someShortenedDummyName "{}";
''').format(long_signal_name).encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc)
assert matrix.frames[0].signals[0].name == long_signal_name
outdbc = io.BytesIO()
canmatrix.formats.dump(matrix, outdbc, "dbc")
long_name_found = False
name_found = False
for line in outdbc.getvalue().decode('utf8').split('\n'):
if line.strip().startswith("SG_"):
assert len(line.split()[1]) <= 32
name_found = True
if line.strip().startswith("BA_ "):
assert line.split()[5][1:-2] == long_signal_name
long_name_found = True
assert long_name_found is True
assert name_found is True
def test_create_define():
defaults = {}
test_string = canmatrix.formats.dbc.create_define("my_data_type", canmatrix.Define('ENUM "A","B"'), "BA_", defaults)
assert test_string == 'BA_DEF_ BA_ "my_data_type" ENUM "A","B";\n'
def test_create_attribute_string():
test_string = canmatrix.formats.dbc.create_attribute_string("my_attribute", "BO_", "name", "value", True)
assert test_string == 'BA_ "my_attribute" BO_ name "value";\n'
test_string = canmatrix.formats.dbc.create_attribute_string("my_attribute", "BO_", "name", 1.23, False)
assert test_string == 'BA_ "my_attribute" BO_ name 1.23;\n'
def test_create_comment_string():
test_string = canmatrix.formats.dbc.create_comment_string("BO_", "ident", "some comment", "utf8", "utf8")
assert test_string == b'CM_ BO_ ident "some comment";\n'
def test_long_frame_name_imports():
long_frame_name = u'A_VERY_LONG_FRAME_NAME_WHICH_SHOULD_BE_SPLIT_SOMEHOW'
assert len(long_frame_name) > 32
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 1 shortendeFrameName: 1 someEcu
SG_ someSignal: 1|2@0+ (1,0) [0|0] "" CCL_TEST
BA_ "SystemMessageLongSymbol" BO_ 1 "{}";
''').format(long_frame_name).encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc)
long_name_found = False
name_found = False
assert matrix.frames[0].name == long_frame_name
outdbc = io.BytesIO()
canmatrix.formats.dump(matrix, outdbc, "dbc")
for line in outdbc.getvalue().decode('utf8').split('\n'):
if line.strip().startswith("BO_"):
assert len(line.split()[2][:-1]) <= 32
name_found = True
if line.strip().startswith("BA_ "):
assert line.split()[4][1:-2] == long_frame_name
long_name_found = True
assert long_name_found is True
assert name_found is True
def test_long_ecu_name_imports():
long_ecu_name = u'A_VERY_LONG_ECU_NAME_WHICH_SHOULD_BE_SPLIT_SOMEHOW'
assert len(long_ecu_name) > 32
dbc = io.BytesIO(textwrap.dedent(u'''\
BU_: SoMEShortenedEcuName
BO_ 1 testFrame1: 1 SoMEShortenedEcuName
SG_ someSignal: 1|2@0+ (1,0) [0|0] "" CCL_TEST
BA_ "SystemNodeLongSymbol" BU_ SoMEShortenedEcuName "{}";
''').format(long_ecu_name).encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc)
long_name_found = False
name_found = False
assert matrix.ecus[0].name == long_ecu_name
outdbc = io.BytesIO()
canmatrix.formats.dump(matrix, outdbc, "dbc")
for line in outdbc.getvalue().decode('utf8').split('\n'):
if line.strip().startswith("BU_"):
assert len(line.split()[1]) <= 32
name_found = True
if line.strip().startswith("BA_ "):
assert line.split()[4][1:-2] == long_ecu_name
long_name_found = True
assert long_name_found is True
assert name_found is True
def test_long_envvar_name_imports():
long_envvar_name = u'A_VERY_LONG_ENVIROMENT_NAME_WHICH_SHOULD_BE_SPLIT_SOMEHOW'
assert len(long_envvar_name) > 32
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 1 frameName: 1 someEcu
SG_ someSignal: 1|2@0+ (1,0) [0|0] "" CCL_TEST
EV_ someShortendEnvVar: 0 [0|0] "" 0 2 DUMMY_NODE_VECTOR0 Vector__XXX;
BA_ "SystemEnvVarLongSymbol" EV_ someShortendEnvVar "{}";
''').format(long_envvar_name).encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc)
assert list(matrix.env_vars)[0] == long_envvar_name
outdbc = io.BytesIO()
canmatrix.formats.dump(matrix, outdbc, "dbc")
long_name_found = False
name_found = False
for line in outdbc.getvalue().decode('utf8').split('\n'):
if line.strip().startswith("EV_"):
assert len(line.split()[1]) <= 32
name_found = True
if line.strip().startswith("BA_ "):
assert line.split()[3][1:-2] == long_envvar_name
long_name_found = True
assert long_name_found is True
assert name_found is True
def test_enum_with_comma():
dbc = io.BytesIO(textwrap.dedent(u'''\
BA_DEF_ "example0" ENUM "Val1",",";
BA_DEF_ BO_ "example1" ENUM "Val 1","vector_leerstring",""," ","'","(",")","[","]","/","-","|","{","}",";",":","<",">",".","?","!","@","#","$","%","^","&","=","`","~";
BA_DEF_ SG_ "example2" ENUM "Val1",",";
BA_DEF_ EV_ "example3" ENUM "Val1",",";
BA_DEF_ BU_ "example4" ENUM "Val1",",";
BA_DEF_DEF_ "example0" ",";
BA_DEF_DEF_ "example1" ",";
BA_DEF_DEF_ "example2" ",";
BA_DEF_DEF_ "example3" ",";
BA_DEF_DEF_ "example4" ",";
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.frame_defines[u'example1'].values == ["Val 1", "", ""] + list(" '()[]/-|{};:<>.?!@#$%^&=`~")
assert matrix.signal_defines[u'example2'].values == ['Val1', ',']
assert matrix.ecu_defines[u'example4'].values == ['Val1', ',']
@pytest.mark.parametrize(
'character',
[
['{}'.format(c if c != '"' else '\\"')]
for c in string.punctuation
],
)
def test_enum_with_special_character(character):
dbc = io.BytesIO(textwrap.dedent(u'''\
BA_DEF_ BO_ "example1" ENUM "Val 1","{}";
''').format(character[0]).encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.frame_defines[u'example1'].values == ["Val 1", character[0]]
def test_export_of_unknown_defines():
db = canmatrix.CanMatrix()
db.add_frame_defines("Receivable", 'BOOL False True')
db.add_frame_defines("Sendable", 'BOOL False True')
for (dataType, define) in db.frame_defines.items():
orig_definition = define.definition
canmatrix.formats.dbc.check_define(define)
assert orig_definition != define.definition
db.add_signal_defines("LongName", 'STR')
for (dataType, define) in db.signal_defines.items():
orig_definition = define.definition
canmatrix.formats.dbc.check_define(define)
assert orig_definition != define.definition
frame = canmatrix.Frame("someFrame")
signal = canmatrix.Signal("SomeSignal")
signal.add_attribute("LongName", "EnableCalcIDCTrip Calc. IDC trip")
frame.add_signal(signal)
db.add_frame(frame)
db.add_ecu_defines("someName", 'STRING')
for (dataType, define) in db.ecu_defines.items():
orig_definition = define.definition
canmatrix.formats.dbc.check_define(define)
assert orig_definition == define.definition
db.add_global_defines("someGlobaName", 'BOOL')
for (dataType, define) in db.global_defines.items():
orig_definition = define.definition
canmatrix.formats.dbc.check_define(define)
assert orig_definition != define.definition
outdbc = io.BytesIO()
canmatrix.formats.dump(db, outdbc, "dbc")
for line in outdbc.getvalue().decode('utf8').split('\n'):
if line.startswith("BA_DEF_ "):
assert line.endswith("STRING;")
if line.startswith("BA_ "):
assert line.endswith('";')
def test_braces_in_attributes():
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 20 frameName: 1 someEcu
SG_ sometext: 1|2@0+ (1,0) [0|0] "" someOtherEcu
BA_ "Signal Age [ms]" SG_ 20 sometext 5000;
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
def test_defines_with_spaces():
dbc = io.BytesIO(textwrap.dedent(u'''\
BU_: someOtherEcu
BO_ 123 someFrame: 1 someOtherEcu
EV_ someEnvVar: 0 [0|0] "" 0 2 DUMMY_NODE_VECTOR0 Vector__XXX;
BA_DEF_ BU_ "Node Address" INT 0 255;
BA_DEF_ BO_ "Period [ms]" INT 0 5000;
BA_DEF_ BU_ "Description X" STRING;
BA_DEF_ EV_ "some attrib" STRING;
BA_ "Node Address" BU_ someOtherEcu 42;
BA_ "Description X" BU_ someOtherEcu "Some Some Text";
BA_ "Period [ms]" BO_ 123 3000;
BA_ "some attrib" EV_ someEnvVar "some space";
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.ecu_defines["Node Address"].type == "INT"
assert matrix.ecu_defines["Node Address"].min == 0
assert matrix.ecu_defines["Node Address"].max == 255
assert matrix.frame_defines["Period [ms]"].min == 0
assert matrix.frame_defines["Period [ms]"].max == 5000
assert matrix.frames[0].attributes["Period [ms]"] == '3000'
assert matrix.env_vars["someEnvVar"]["attributes"]["some attrib"] == '"some space"'
assert matrix.ecus[0].attributes["Description X"] == "Some Some Text"
def test_writing_complex_multiplex():
db = canmatrix.CanMatrix()
frame = canmatrix.Frame("someFrame")
frame.is_complex_multiplexed = True
signal = canmatrix.Signal("mx")
signal.mux_val_max = 5
signal.mux_val_min = 1
signal.muxer_for_signal = 4
frame.add_signal(signal)
db.add_frame(frame)
outdbc = io.BytesIO()
canmatrix.formats.dump(db, outdbc, "dbc")
for line in outdbc.getvalue().decode('utf8').split('\n'):
if "SG_MUL_VAL" in line:
return True
assert False
def test_defines_with_special_cars():
dbc = io.BytesIO(textwrap.dedent(u'''\
BU_: someOtherEcu
BO_ 123 someFrame: 1 someOtherEcu
SG_ someSignal: 1|2@0+ (1,0) [0|0] "" CCL_TEST
BA_DEF_ SG_ "Accuracy" STRING;
BA_ "Accuracy" SG_ 123 someSignal "+/- 10.2 at 55.1%";
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.frames[0].signals[0].attributes["Accuracy"] == "+/- 10.2 at 55.1%"
def test_j1939_frametype():
dbc = io.BytesIO(textwrap.dedent(u'''\
BU_: someOtherEcu
BO_ 2147483648 someFrame: 1 someOtherEcu
SG_ someSignal: 1|2@0+ (1,0) [0|0] "" CCL_TEST
BA_DEF_ BO_ "VFrameFormat" ENUM "StandardCAN","ExtendedCAN","J1939PG";
BA_ "VFrameFormat" BO_ 2147483648 2;
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.frames[0].is_j1939 == True
# negative test
dbc = io.BytesIO(textwrap.dedent(u'''\
BU_: someOtherEcu
BO_ 2147483648 someFrame: 1 someOtherEcu
SG_ someSignal: 1|2@0+ (1,0) [0|0] "" CCL_TEST
BA_DEF_ BO_ "VFrameFormat" ENUM "StandardCAN","ExtendedCAN","J1939PG";
BA_ "VFrameFormat" BO_ 2147483648 0;
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
assert matrix.frames[0].is_j1939 == False
```
#### File: canmatrix/tests/test_j1939_decoder.py
```python
import io
import canmatrix.j1939_decoder
import textwrap
import collections
def test_j1939_decoder():
dbc = io.BytesIO(textwrap.dedent(u'''\
BO_ 2566856834 CM_Requests: 9 CGW
SG_ CM_Inlet_MotorRequest : 50|2@0+ (1,0) [0|3] "" CM
SG_ CM_ChargeUnit_Request : 52|2@0+ (1,0) [0|3] "" CM
SG_ CM_RTC_TimerValue : 47|8@0+ (1,0) [0|254] "min" CM
SG_ CM_RTC_TimerRequest : 37|2@0+ (1,0) [0|3] "" CM
SG_ CM_PlugLock_MotorRequest : 35|3@0+ (1,0) [0|7] "" CM
SG_ CM_LED2_Request : 23|8@0+ (0.5,0) [0|100] "%" CM
SG_ CM_LED1_Request : 15|8@0+ (0.5,0) [0|100] "%" CM
SG_ CM_LED0_Request : 7|8@0+ (0.5,0) [0|100] "%" CM
SG_ CM_HighSideOut4_Request : 39|2@0+ (1,0) [0|3] "" CM
SG_ CM_HighSideOut3_Request : 25|2@0+ (1,0) [0|3] "" CM
SG_ CM_HighSideOut2_Request : 27|2@0+ (1,0) [0|3] "" CM
SG_ CM_HighSideOut1_Request : 29|2@0+ (1,0) [0|3] "" CM
SG_ CM_HighSideOut0_Request : 31|2@0+ (1,0) [0|3] "" CM
SG_ CM_ControlPilot_ChargeModeRe : 55|3@0+ (1,0) [0|7] "" CM
''').encode('utf-8'))
matrix = canmatrix.formats.dbc.load(dbc, dbcImportEncoding="utf8")
t = canmatrix.j1939_decoder.j1939_decoder()
# BAM
(type, signals) = t.decode(canmatrix.ArbitrationId(id = 0xec0000, extended= True),
bytearray([0x20,10,0,1,0xff,0x66,0x1,0]), matrix)
assert "BAM " in type
# print (type, signals)
# data 1
(type, signals) = t.decode(canmatrix.ArbitrationId(id = 0xeb0000, extended= True),
bytearray([0x0,1,1,1,1,1,1,1]), matrix)
assert "BAM data" in type
#print (type, signals)
# data 2
(type, signals) = t.decode(canmatrix.ArbitrationId(id = 0xeb0000, extended= True),
bytearray([0x1,1,1,1,1,1,1,1]), matrix)
assert "BAM last data" in type
# print (type, signals)
can_ids = [0x18ECFF82, 0x18EBFF82, 0x18EBFF82]
can_data = [bytearray([0x20, 9, 0, 2, 0xff, 0x20, 0xff, 0]),bytearray([0x1, 0, 0, 0, 0, 0x80, 0x0, 0x80]),bytearray([0x2, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff])]
# BA0x20, M
for i in range(0,len(can_ids)):
(type, signals) = t.decode(canmatrix.ArbitrationId(id=can_ids[i], extended=True),
can_data[i], matrix)
print ("-------- test data -------- ")
test_frames = collections.OrderedDict ([
(0xcef27fd , "fffae1ff00ffff"),
(0xcffcafd , "c0fffffffffff800"),
(0xcf00203 , "cc00000000b812ff"),
(0xfe4a03 , "fffcffffffffffff"),
(0xc010305 , "ccfffaffff204e0a"),
(0x0CF00400, "F4DEDE3028FFF0FF")])
expected = ["EEC1","TC1","ETC7","ETC1"]
for arb_id, asc_data in test_frames.items():
(type, signals) = t.decode(canmatrix.ArbitrationId(id=arb_id, extended=True),
bytearray.fromhex(asc_data), matrix)
if type is not None and "J1939 known" in type:
assert expected.pop() in type
```
#### File: canmatrix/tests/test_xls.py
```python
import canmatrix.formats.xls
import decimal
def test_parse_value_name_collumn():
value_column = "1..5"
(mini, maxi, offset, value_table) = canmatrix.formats.xls.parse_value_name_column(value_column, "5", 4, decimal.Decimal)
assert maxi == 5
assert mini == 1
assert offset == 1
assert value_table == dict()
value_column = "LabelX"
(mini, maxi, offset, value_table) = canmatrix.formats.xls.parse_value_name_column(value_column, "5", 4, decimal.Decimal)
assert maxi == 15
assert mini == 0
assert offset == 0
assert value_table == {5: "LabelX"}
``` |
{
"source": "johnofleek/Octave",
"score": 2
} |
#### File: Octave/edgeDebug/actionLog.py
```python
import sys
import time
import re
# Example "run"
# logread -f | python actionLog.py action_id 876876
# logread -f | python actionLog.py usprxedge:l5f58e19c2ebc93700459b5ca,hmigauge:l5f58e19cda3994a414b567fd,hmisignalled:l5f58e19c2ebc93700459b5c4
# logread -f | python actionLog.py usp:l5f104a5ff936e13e915d6fb9
# Example raw logread OP
# Sep 16 16:08:33 swi-mdm9x28-wp user.info Legato: INFO | actions[1123]/actionRunner T=Javascript | actionRunner_JS.c js_LogInfoHandler() 1007 | action_id:l5f58e19c2ebc93700459b5c4 | [1,14,0,0,0,15]
actions = {}
def index_in_list(a_list, index):
print(index < len(a_list))
# print("\n".join(sys.argv))
## match any action name info
numberOfArgs = len(sys.argv) -1
idd = {}
if (numberOfArgs == 1): # Expected list is ["thisScript", "typeoffilter" ]
processidlist = sys.argv[1]
idlist = processidlist.split(',')
for ids in idlist:
idss = ids.split(':')
idd[idss[1]] = idss[0]
# print idd
elif (numberOfArgs == 2): # Expected list is ["thisScript", "typeoffilter", "filter" ]
processidlist = sys.argv[1]
processfilter = sys.argv[2]
# process data from logread
try:
buff = ''
while True:
buff += sys.stdin.read(1)
if buff.endswith('\n'):
line = buff[:-1]
#print line
info = line.split('|')
try:
if ("action_id:" not in line ):
pass
else:
timeofday = info[0].split()
timeofday = timeofday[2]
id = info[3].strip()
id = re.sub('action_id:', '', id)
# try and find a matching id name
# for key in id.keys():
# if key
consolelog = info[4].strip()
idname = "xxxxxxxxxx"
if id in idd:
idname = idd[id]
print timeofday.ljust(10)[:10], id.ljust(28)[:28], idname.ljust(18)[:18] , consolelog
except Exception as e:
print(e)
buff = ''
except KeyboardInterrupt:
sys.stdout.flush()
pass
``` |
{
"source": "johnofleek/OctaveOrp",
"score": 2
} |
#### File: OctaveOrp/Python2/runEmu.py
```python
import os
from json import dumps
import logging
from platform import platform
from psutil import cpu_percent, virtual_memory
from serial import Serial
from time import sleep
from sb_serial import Sensor, SbSerial
# Change the serial port to suit the machine that this running on
# and the OS
#DEV = os.getenv('DEV', '/dev/ttyS0')
#DEV = os.getenv('DEV', '/dev/tty.SLAB_USBtoUART')
DEV = os.getenv('DEV', 'COM11')
# Create serial object and use it to create SbSerial connection
s = Serial(DEV)
sbs = SbSerial(s)
def json_measurement_handler():
# note python dict has bool True/False JSON true/False
dictPayload = {
"temperature": 123.2,
"waterLevelHigh": True,
"waterLevelLow": False,
"Turbidity": 45,
"DissolvedOxygen": 78,
"Debris": True,
"flowIn": 12.5,
"flowOut": 11.8
}
# return dictionary convert to JSON
return dumps(dictPayload)
def json_settings_handler():
# note python dict has bool True/False JSON true/False
dictPayload = {
"Fog_roller_OFF":300,
"Fog_roller_ON":300,
"Recirculation_OFF":90,
"Recirculation_ON":10,
"Service_pump_OFF":2550,
"Service_pump_ON":90,
"Solenoid_valve_OFF":2550,
"Solenoid_valve_ON":120,
"cleaningCycles":3,
"unitOFF":2,
"unitON":7
}
# return dictionary convert to JSON
return dumps(dictPayload)
# Create sensor objects with SbSerial connection, callback, type,
# resource path, and optionally unit of measure
sensors = [
Sensor(sbs, json_measurement_handler, 'json', 'sensors/uplinkMeasured'),
Sensor(sbs, json_settings_handler, 'json', 'sensors/controlSettings')
]
[sensor.create_sensor() for sensor in sensors]
# Run Forever
while True:
try:
sleep(10)
print("loop")
except KeyboardInterrupt:
exit(0)
```
#### File: Python2v300/octave_rp/protocol.py
```python
SBR_PKT_RQST_INPUT_CREATE = 'I' # type[1] d_type[1] pad[2] path[] units[]
SBR_PKT_RESP_INPUT_CREATE = 'i' # type[1] status[1] pad[2]
SBR_PKT_RQST_OUTPUT_CREATE = 'O' # type[1] d_type[1] pad[2] path[] units[]
SBR_PKT_RESP_OUTPUT_CREATE = 'o' # type[1] status[1] pad[2]
SBR_PKT_RQST_DELETE = 'D' # type[1] pad[1] pad[2] path[]
SBR_PKT_RESP_DELETE = 'd' # type[1] status[1] pad[2]
SBR_PKT_RQST_HANDLER_ADD = 'H' # type[1] pad[1] pad[2] path[]
SBR_PKT_RESP_HANDLER_ADD = 'h' # type[1] status[1] pad[2]
SBR_PKT_RQST_HANDLER_REMOVE = 'K' # type[1] pad[1] pad[2] path[]
SBR_PKT_RESP_HANDLER_REMOVE = 'k' # type[1] status[1] pad[2]
SBR_PKT_RQST_PUSH = 'P' # type[1] d_type[1] pad[2] time[] path[] data[]
SBR_PKT_RESP_PUSH = 'p' # type[1] status[1] pad[2]
SBR_PKT_RQST_GET = 'G' # type[1] pad[1] pad[2] path[]
SBR_PKT_RESP_GET = 'g' # type[1] status[1] pad[2] time[] data[]
SBR_PKT_RQST_EXAMPLE_SET = 'E' # type[1] d_type[1] pad[2] path[] data[]
SBR_PKT_RESP_EXAMPLE_SET = 'e' # type[1] status[1] pad[2]
SBR_PKT_RQST_SENSOR_CREATE = 'S' # type[1] d_type[1] pad[2] path[] units[]
SBR_PKT_RESP_SENSOR_CREATE = 's' # type[1] status[1] pad[2]
SBR_PKT_RQST_SENSOR_REMOVE = 'R' # type[1] pad[1] pad[2] path[]
SBR_PKT_RESP_SENSOR_REMOVE = 'r' # type[1] status[1] pad[2]
SBR_PKT_NTFY_HANDLER_CALL = 'c' # type[1] d_type[1] pad[2] time[] path[] data[]
SBR_PKT_RESP_HANDLER_CALL = 'C' # type[1] status[1] pad[2]
SBR_PKT_NTFY_SENSOR_CALL = 'b' # type[1] pad[1] pad[2] path[]
SBR_PKT_RESP_SENSOR_CALL = 'B' # type[1] status[1] pad[2]
SBR_PKT_RESP_UNKNOWN_RQST = '?' # type[1] status[1] pad[2]
#
# Data type field - byte 1
#
SBR_DATA_TYPE_TRIGGER = 'T' # trigger - no data
SBR_DATA_TYPE_BOOLEAN = 'B' # Boolean - 1 byte: 't' | 'f'
SBR_DATA_TYPE_NUMERIC = 'N' # numeric - null-terminated ASCII string, representing double
SBR_DATA_TYPE_STRING = 'S' # string - null-terminated ASCII string
SBR_DATA_TYPE_JSON = 'J' # JSON - null-terminated ASCII string, representing JSON
SBR_DATA_TYPE_UNDEF = ' ' # not specified
#
# Variable length field identifiers
#
SBR_FIELD_ID_PATH = 'P'
SBR_FIELD_ID_TIME = 'T'
SBR_FIELD_ID_UNITS = 'U'
SBR_FIELD_ID_DATA = 'D'
# Variable length field separator
SBR_VARLENGTH_SEPARATOR = ','
#
# Packet type descriptions
#
ptypes = [
[ SBR_PKT_RQST_INPUT_CREATE, 'request create input' ],
[ SBR_PKT_RESP_INPUT_CREATE, 'response create input' ],
[ SBR_PKT_RQST_OUTPUT_CREATE, 'request create output' ],
[ SBR_PKT_RESP_OUTPUT_CREATE, 'response create output' ],
[ SBR_PKT_RQST_DELETE, 'request delete resource' ],
[ SBR_PKT_RESP_DELETE, 'response delete resource' ],
[ SBR_PKT_RQST_HANDLER_ADD, 'request add handler' ],
[ SBR_PKT_RESP_HANDLER_ADD, 'response add handler' ],
[ SBR_PKT_RQST_HANDLER_REMOVE, 'request remove handler' ],
[ SBR_PKT_RESP_HANDLER_REMOVE, 'response remove handler' ],
[ SBR_PKT_RQST_PUSH, 'request push' ],
[ SBR_PKT_RESP_PUSH, 'response push' ],
[ SBR_PKT_RQST_GET, 'request get' ],
[ SBR_PKT_RESP_GET, 'response get' ],
[ SBR_PKT_RQST_EXAMPLE_SET, 'request set example' ],
[ SBR_PKT_RESP_EXAMPLE_SET, 'response set example' ],
[ SBR_PKT_RQST_SENSOR_CREATE, 'request create sensor' ],
[ SBR_PKT_RESP_SENSOR_CREATE, 'response create sensor' ],
[ SBR_PKT_RQST_SENSOR_REMOVE, 'request remove sensor' ],
[ SBR_PKT_RESP_SENSOR_REMOVE, 'response remove sensor' ],
[ SBR_PKT_NTFY_HANDLER_CALL, 'handler call' ],
[ SBR_PKT_RESP_HANDLER_CALL, 'handler ack' ],
[ SBR_PKT_NTFY_SENSOR_CALL, 'sensor poll' ],
[ SBR_PKT_RESP_SENSOR_CALL, 'sensor poll ack' ],
[ SBR_PKT_RESP_UNKNOWN_RQST, 'unknown packet type' ],
]
#
# Status field
#
status_list = [
'OK',
'NOT FOUND',
'NOT POSSIBLE', # deprecated
'OUT OF RANGE',
'NO MEMORY',
'NOT PERMITTED',
'FAULT',
'COMM ERROR',
'TIMEOUT',
'OVERFLOW',
'UNDERFLOW',
'WOULD BLOCK',
'DEADLOCK',
'FORMAT ERROR',
'DUPLICATE',
'BAD PARAMETER',
'CLOSED',
'BUSY',
'UNSUPPORTED',
'IO_ERROR',
'NOT IMPLEMENTED',
'UNAVAILABLE',
'TERMINATED'
]
#
# Data types
#
data_types = [
[ 'trig', SBR_DATA_TYPE_TRIGGER ],
[ 'bool', SBR_DATA_TYPE_BOOLEAN ],
[ 'num', SBR_DATA_TYPE_NUMERIC ],
[ 'str', SBR_DATA_TYPE_STRING ],
[ 'json', SBR_DATA_TYPE_JSON ]
]
#
# Syntax
#
syntax_list = [
' create input|output|sensor trig|bool|num|str|json <path> [<units>]',
' delete resource|handler|sensor <path>',
' add handler <path>',
' push trig|bool|num|str|json <path> [<data>]',
' get <path>',
' example json <path> [<data>]'
]
#
# Usage
#
def print_usage():
print 'Usage:'
for i in range(len(syntax_list)):
print syntax_list[i]
print
#
# Encode data type
#
def encode_dtype(data_type):
field = ''
dtype = data_type.lower()
if dtype[0] == 't':
field = field + SBR_DATA_TYPE_TRIGGER
elif dtype[0] == 'b':
field = field + SBR_DATA_TYPE_BOOLEAN
elif dtype[0] == 'n':
field = field + SBR_DATA_TYPE_NUMERIC
elif dtype[0] == 's':
field = field + SBR_DATA_TYPE_STRING
elif dtype[0] == 'j':
field = field + SBR_DATA_TYPE_JSON
else:
print 'Invalid data type'
return
return field
#
# Encode segment number and segment count
#
def encode_segment():
return '01'
#
# Encode path
#
def encode_path(path):
return SBR_FIELD_ID_PATH + path
#
# Encode units
#
def encode_units(units):
return SBR_FIELD_ID_UNITS + units
#
# Encode data
#
def encode_data(data):
return SBR_FIELD_ID_DATA + data
#
# create input|output|sensor data-type path [units]
#
def encode_create(argc, args):
packet = ''
dtype = ''
syntax = syntax_list[0]
if argc < 3 :
print 'Invalid number of arguments'
print syntax_list[0]
return
if argc > 3 :
what,data_type,path,units = args.split(' ')
else:
what,data_type,path = args.split(' ')
units = None
what = what.lower()
if what[0] == 'i':
packet = packet + SBR_PKT_RQST_INPUT_CREATE
elif what[0] == 'o':
packet = packet + SBR_PKT_RQST_OUTPUT_CREATE
elif what[0] == 's':
packet = packet + SBR_PKT_RQST_SENSOR_CREATE
else:
print 'Invalid request'
print syntax_list[0]
return
dtype = encode_dtype(data_type)
if dtype == '':
return
packet = packet + dtype
packet = packet + encode_segment()
packet = packet + encode_path(path)
if units != None:
packet = packet + SBR_VARLENGTH_SEPARATOR
packet = packet + encode_units(units)
return packet
#
# delete resource|handler|sensor path
#
def encode_delete(argc, args):
packet = ''
if argc < 2 :
print 'Invalid number of arguments'
print syntax_list[1]
return
what,path = args.split(' ')
what = what.lower()
# packet type
if what[0] == 'r':
packet = packet + SBR_PKT_RQST_DELETE
elif what[0] == 'h':
packet = packet + SBR_PKT_RQST_HANDLER_REMOVE
elif what[0] == 's':
packet = packet + SBR_PKT_RQST_SENSOR_REMOVE
else:
print 'Invalid request'
print syntax_list[1]
return
packet = packet + '.'
packet = packet + encode_segment()
packet = packet + encode_path(path)
return packet
#
# add handler path
#
def encode_add(argc, args):
packet = ''
if argc < 2 :
print 'Invalid number of arguments'
print syntax_list[2]
return
what,path = args.split(' ')
what = what.lower()
if what[0] != 'h':
print 'Invalid request ' + what
print syntax_list[2]
return
packet = packet + SBR_PKT_RQST_HANDLER_ADD
# data type - ignored
packet = packet + '.'
packet = packet + encode_segment()
packet = packet + encode_path(path)
return packet
#
# push data-type path [data]
#
def encode_push(argc, args):
packet = ''
dtype = ''
if argc < 2 :
print 'Invalid number of arguments'
print syntax_list[3]
return
print 'argc: ' + str(argc)
if argc > 2 :
data_type,path,data = args.split(' ', 2)
else:
data_type,path = args.split(' ')
data = ''
packet = packet + SBR_PKT_RQST_PUSH
dtype = encode_dtype(data_type)
if dtype == '':
return
packet = packet + dtype
packet = packet + encode_segment()
packet = packet + encode_path(path)
if data != '':
packet = packet + SBR_VARLENGTH_SEPARATOR
packet = packet + encode_data(data)
return packet
#
# get path
#
def encode_get(argc, args):
packet = ''
dtype = ''
if argc < 1 :
print 'Invalid number of arguments'
print syntax_list[4]
return
packet = packet + SBR_PKT_RQST_GET
# data type ignored
packet = packet + '.'
packet = packet + encode_segment()
packet = packet + encode_path(args)
return packet
#
# example data-type path [data]
#
def encode_example(argc, args):
packet = ''
dtype = ''
if argc < 2 :
print 'Invalid number of arguments'
print syntax_list[5]
return
if argc > 2 :
data_type,path,data = args.split(' ', 2)
else:
data_type,path = args.split(' ')
data = ''
packet = packet + SBR_PKT_RQST_EXAMPLE_SET
dtype = encode_dtype(data_type)
if dtype == '':
return
packet = packet + dtype
packet = packet + encode_segment()
packet = packet + encode_path(path)
if data != '':
packet = packet + SBR_VARLENGTH_SEPARATOR
packet = packet + encode_data(data)
return packet
#
# Parse command and build a request packet
#
def encode_request(request):
if request.find(' ') < 0 :
print_usage()
return
argc = len(request.split(' ')) - 1
# all commands take at least one argument
if argc < 1 :
print_usage()
return
request_type,args = request.split(' ', 1)
request_type = request_type.lower()
if request_type[0] == 'c':
p = encode_create(argc, args)
elif request_type[0] == 'd':
p = encode_delete(argc, args)
elif request_type[0] == 'a':
p = encode_add(argc, args)
elif request_type[0] == 'p':
p = encode_push(argc, args)
elif request_type[0] == 'g':
p = encode_get(argc, args)
elif request_type[0] == 'e':
p = encode_example(argc, args)
elif request_type[0] == 'r':
p = args
else:
print_usage()
return
return p
#
# Decode and print contents of an incoming packet
#
def decode_response(response):
resp = {}
# Positional fields:
ptype = response[0]
status = response[1]
seg_number = response[2]
seg_count = response[3]
# Labeled, variable length fields
var_length = response[4:]
for i in range(len(ptypes)):
test = ptypes[i]
if test[0] == ptype:
resp['responseType'] = test[0]
# Status is represented in ASCII, starting with '@' (0x40) for OK.
# Subtract 0x40 to index into the table, above
#
i = ord(status[0]) - ord('\x40')
resp['status'] = status_list[i]
if len(var_length):
var_fields = var_length.split(',')
for i in range(len(var_fields)):
field = var_fields[i]
if field[0] == 'P':
resp['path'] = field[1:]
if field[0] == 'T':
resp['timestamp'] = field[1:]
if field[0] == 'D':
resp['data'] = field[1:]
return resp
```
#### File: octave_rp/PyCRC/CRC32.py
```python
from ctypes import c_ulong
class CRC32(object):
crc32_tab = []
# The CRC's are computed using polynomials. Here is the most used
# coefficient for CRC32
crc32_constant = 0xEDB88320
def __init__(self):
# initialize the precalculated tables
if not len(self.crc32_tab):
self.init_crc32()
def calculate(self, input_data=None):
try:
is_string = isinstance(input_data, str)
is_bytes = isinstance(input_data, bytes)
if not is_string and not is_bytes:
raise Exception("Please provide a string or a byte sequence as \
argument for calculation.")
crcValue = 0xffffffff
for c in input_data:
d = ord(c) if is_string else c
tmp = crcValue ^ d
crcValue = (crcValue >> 8) ^ int(
self.crc32_tab[(tmp & 0x00ff)], 0)
# Only for CRC-32: When all bytes have been processed, take the
# one's complement of the obtained CRC value
crcValue ^= 0xffffffff # (or crcValue = ~crcValue)
return crcValue
except Exception as e:
print("EXCEPTION(calculate): {}".format(e))
def init_crc32(self):
'''The algorithm use tables with precalculated values'''
for i in range(0, 256):
crc = i
for j in range(0, 8):
if (crc & 0x00000001):
crc = int(c_ulong(crc >> 1).value) ^ self.crc32_constant
else:
crc = int(c_ulong(crc >> 1).value)
self.crc32_tab.append(hex(crc))
```
#### File: OctaveOrp/Python3/simpleData.py
```python
import random
random.seed()
from json import dumps
# import logging
# from platform import platform
# from psutil import cpu_percent, virtual_memory
try:
from serial import Serial
import os
#DEV = os.getenv('DEV', '/dev/ttyS0')
DEV = os.getenv('DEV', 'COM11')
print("using serial")
except:
from serialM import Serial
DEV = "aPort"
print("using pybv serial")
from time import sleep
import gc
from octave_rp import OctaveRP
from octave_rp import Output
from octave_rp import Input
from octave_rp import Sensor
# Create serial object and use it to create SbSerial connection
s = Serial(DEV)
orp = OctaveRP(s)
# Global data for simplicity
# note python dict has bool True/False JSON true/False
'''
measurementData = {
"temperature": 123.2,
"waterLevelHigh": True,
"waterLevelLow": False,
"Turbidity": 45,
"DissolvedOxygen": 78,
"Debris": True,
"flowIn": 12.5,
"flowOut": 11.8
}
settingsData = {
"Fog_roller_OFF":300,
"Fog_roller_ON":300,
"Recirculation_OFF":90,
"Recirculation_ON":10,
"Service_pump_OFF":2550,
"Service_pump_ON":90,
"Solenoid_valve_OFF":2550,
"Solenoid_valve_ON":120,
"cleaningCycles":3,
"unitOFF":2,
"unitON":7
}
'''
downPath = "dwn/"
Recirculation_OFF_str = "rof"
## this callback handler is called when Octave cloud sends some data that matches
def Recirculation_OFF_cbh(data):
print ("Recirculation_OFF_cbh", data)
# mimic the settings dictionary but setting values from the cloud
## Register callback handler with Octave
Recirculation_OFF_octave = Output(orp, Recirculation_OFF_cbh, 'num', (downPath + Recirculation_OFF_str))
Recirculation_OFF_octave.create_output()
downPath = "dwn/"
Recirculation_ON_str = "ron"
## this callback handler is called when Octave cloud sends some data that matches
def Recirculation_ON_cbh(data):
print ("Recirculation_ON_cbh", data)
# mimic the settings dictionary but setting values from the cloud
## Register callback handler with Octave
Recirculation_ON_octave = Output(orp, Recirculation_ON_cbh, 'num', (downPath + Recirculation_ON_str))
Recirculation_ON_octave.create_output()
up={} # dictionary
# note it seems paths must be alpha chars only
# create the Octave inputs
up["temperature"] = Input(orp, 'num' , 'sense/temperature', 'number') # we might want to simplify the text
up["waterLevelHigh"] = Input(orp, 'bool', 'sense/waterLevelHigh','bool')
up["waterLevelLow"] = Input(orp, 'bool', 'sense/waterLevelLow','bool')
up["Turbidity"] = Input(orp, 'num' , 'sense/Turbidity','number')
up["DissolvedOxygen"]= Input(orp, 'num' , 'sense/DissolvedOxygen','number')
up["Debris"] = Input(orp, 'bool', 'sense/Debris','bool')
up["flowIn"] = Input(orp, 'num' , 'sense/flowIn','number')
up["flowOut"] = Input(orp, 'num' , 'sense/flowOut','number')
for key in up:
print("Creating", key)
up[key].create_input()
turbidity = 55
DissolvedOxygen = 22
# send values to Octave
up["temperature"].send(42.2)
up["waterLevelHigh"].send(True)
up["waterLevelLow"].send(True)
up["Turbidity"].send(turbidity)
up["DissolvedOxygen"].send(DissolvedOxygen)
up["Debris"].send(True)
up["flowIn"].send(11.2)
up["flowOut"].send(23)
# Run Forever
while True:
try:
sleep(10)
turbidity = random.randrange(20,50,1)
DissolvedOxygen = random.randrange(40,60,1)
up["Turbidity"].send(turbidity)
up["DissolvedOxygen"].send(DissolvedOxygen)
except KeyboardInterrupt:
exit(0)
``` |
{
"source": "JohnOmena/software-test-class",
"score": 3
} |
#### File: software-test-class/api/views.py
```python
from django.contrib.auth.models import User
from rest_framework import permissions, viewsets
from api.serializers import TodoListSerializer, TodoSerializer, UserSerializer
from lists.models import Todo, TodoList
class IsCreatorOrReadOnly(permissions.BasePermission):
"""
Object-level permission to only allow owners of an object to edit it.
Assumes the model instance has an `creator` attribute.
"""
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
if not obj.creator:
return True
return obj.creator == request.user
class UserViewSet(viewsets.ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
permission_classes = (permissions.IsAdminUser,)
class TodoListViewSet(viewsets.ModelViewSet):
queryset = TodoList.objects.all()
serializer_class = TodoListSerializer
permission_classes = (IsCreatorOrReadOnly,)
def perform_create(self, serializer):
user = self.request.user
creator = user if user.is_authenticated else None
serializer.save(creator=creator)
class TodoViewSet(viewsets.ModelViewSet):
queryset = Todo.objects.all()
serializer_class = TodoSerializer
permission_classes = (IsCreatorOrReadOnly,)
def perform_create(self, serializer):
user = self.request.user
creator = user if user.is_authenticated else None
serializer.save(creator=creator)
``` |
{
"source": "JohnOmernik/jupyter_hive",
"score": 2
} |
#### File: jupyter_hive/hive_core/hive_base.py
```python
import json
import sys
import os
import time
import pandas as pd
from collections import OrderedDict
import requests
from integration_core import Integration
from pyodbc_core import Pyodbc
from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic, line_cell_magic)
from IPython.core.display import HTML
#import IPython.display
from IPython.display import display_html, display, Javascript, FileLink, FileLinks, Image
import ipywidgets as widgets
# Put any additional imports specific to your integration here:
import pyodbc as po
import jupyter_integrations_utility as jiu
@magics_class
class Hive(Pyodbc):
# Static Variables
# The name of the integration
# The class name (Start) should be changed to match the name_str, but with the first letter upper cased.
name_str = "hive"
instances = {}
# These are the ENV variables the integration will check when starting up. The integration_base prefix will be prepended in checking (that defaults to JUPYTER_)
# So the following two items will look for:
# JUPYTER_START_BASE_URL and put it into the opts dict as start_base_url
# JUPYTER_START_USER as put it in the opts dict as start_user
custom_evars = ["hive_conn_default"]
# These are the variables in the opts dict that allowed to be set by the user. These are specific to this custom integration and are joined
# with the base_allowed_set_opts from the integration base
# The three examples here would be "start_base_url, start_ignore_ssl_warn, and start_verbose_errors
# Make sure these are defined in myopts!
custom_allowed_set_opts = ["hive_conn_default"]
# These are the custom options for your integration
myopts = {}
# These are the custom options for your integration
myopts = {}
myopts['hive_max_rows'] = [1000, 'Max number of rows to return, will potentially add this to queries']
myopts['hive_conn_default'] = ["default", 'Default instance name for connections']
# Class Init function - Obtain a reference to the get_ipython()
def __init__(self, shell, debug=False, *args, **kwargs):
super(Hive, self).__init__(shell, debug=debug)
self.debug = debug
#Add local variables to opts dict
for k in self.myopts.keys():
self.opts[k] = self.myopts[k]
self.load_env(self.custom_evars)
self.parse_instances()
# def customDisconnect - In pyodbc
# def customAuth - In pyodbc
# def validateQuery - In pyodbc
# def customQuery - In pyodbc
# def customHelp - In pyodbc
def retCustomDesc(self):
return "Jupyter integration for working with Apache Hive via PyODBC based data sources"
# This is the magic name.
@line_cell_magic
def hive(self, line, cell=None):
if cell is None:
line = line.replace("\r", "")
line_handled = self.handleLine(line)
if self.debug:
print("line: %s" % line)
print("cell: %s" % cell)
if not line_handled: # We based on this we can do custom things for integrations.
if line.lower() == "testintwin":
print("You've found the custom testint winning line magic!")
else:
print("I am sorry, I don't know what you want to do with your line magic, try just %" + self.name_str + " for help options")
else: # This is run is the cell is not none, thus it's a cell to process - For us, that means a query
self.handleCell(cell, line)
``` |
{
"source": "JohnOmernik/jupyter_integration_template",
"score": 2
} |
#### File: jupyter_integration_template/integration_core/integration_base.py
```python
import json
import sys
import os
import time
import pandas as pd
from getpass import getpass
from collections import OrderedDict
from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic, line_cell_magic)
from IPython.core.display import HTML
# Your Specific integration imports go here, make sure they are in requirements! Examples left in for hive
import requests
import socket
from pyhive import hive as hivemod
# BeakerX integration is highly recommened, but at this time IS optional, so we TRY beakerx, and then fail well if its not there.
try:
from beakerx import *
from beakerx.object import beakerx
except:
pass
#import IPython.display
from IPython.display import display_html, display, Javascript, FileLink, FileLinks, Image
import ipywidgets as widgets
@magics_class
class Integration(Magics):
# Static Variables
ipy = None # IPython variable for updating things
session = None # Session if ingeration uses it
connected = False # Is the integration connected
passwd = "" # If the itegration uses a password, it's temp stored here
last_query = ""
name_str = integration
debug = False # Enable debug mode
# Variables Dictionary
opts = {}
# Option Format: [ Value, Description]
# Pandas Variables
opts['pd_display_idx'] = [False, "Display the Pandas Index with output"]
opts['pd_replace_crlf'] = [True, "Replace extra crlfs in outputs with String representations of CRs and LFs"]
opts['pd_max_colwidth'] = [50, 'Max column width to display']
opts['pd_display.max_rows'] = [1000, 'Number of Max Rows']
opts['pd_display.max_columns'] = [None, 'Max Columns']
opts['pd_use_beaker'] = [False, 'Use the Beaker system for Pandas Display']
opts['pd_beaker_bool_workaround'] = [True, 'Look for Dataframes with bool columns, and make it object for display in BeakerX']
pd.set_option('display.max_columns', opts['pd_display.max_columns'][0])
pd.set_option('display.max_rows', opts['pd_display.max_rows'][0])
pd.set_option('max_colwidth', opts['pd_max_colwidth'][0])
# Get Env items (User and/or Base URL)
try:
tuser = os.environ['JUPYTERHUB_' + name_str.upper() + '_USER']
except:
tuser = ''
try:
turl = os.environ['JUPYTERHUB_' + name_str.upper() + '_BASE_URL']
except:
turl = ""
# Hive specific variables as examples
opts[name_str + '_max_rows'] = [1000, 'Max number of rows to return, will potentially add this to queries']
opts[name_str + '_user'] = [tuser, "User to connect with - Can be set via ENV Var: JUPYTER_" + name_str.upper() + "_USER otherwise will prompt"]
opts[name_str + '_base_url'] = [turl, "URL to connect to server. Can be set via ENV Var: JUPYTER_" + name_str.upper() + "_BASE_URL"]
opts[name_str + '_base_url_host'] = ["", "Hostname of connection derived from base_url"]
opts[name_str + '_base_url_port'] = ["", "Port of connection derived from base_url"]
opts[name_str + '_base_url_scheme'] = ["", "Scheme of connection derived from base_url"]
# Class Init function - Obtain a reference to the get_ipython()
def __init__(self, shell, pd_use_beaker=False, *args, **kwargs):
super(Integration, self).__init__(shell)
self.ipy = get_ipython()
self.session = None
self.opts['pd_use_beaker'][0] = pd_use_beaker
if pd_use_beaker == True:
try:
beakerx.pandas_display_table()
except:
print("WARNING - BEAKER SUPPORT FAILED")
def connect(self, prompt=False):
if self.connected == False:
if prompt == True or self.opts[self.name_str + '_user'][0] == '':
print("User not specified in JUPYTER_%s_USER or user override requested" % self.name_str.upper())
tuser = input("Please type user name if desired: ")
self.opts[self.name_str + '_user'][0] = tuser
print("Connecting as user %s" % self.opts[self.name_str'_user'][0])
print("")
if prompt == True or self.opts[self.name_str + "_base_url'][0] == '':
print("%s Base URL not specified in JUPYTER_%s_BASE_URL or override requested" % (self.name_str.capitalize(), self.name_str.upper()))
turl = input("Please type in the full %s URL: " % self.name_str.capitalize())
self.opts[self.name_str + '_base_url'][0] = turl
print("Connecting to %s URL: %s" % (self.name_str.capitalize(), self.opts['_base_url'][0]))
print("")
myurl = self.opts[self.name_str + '_base_url'][0]
ts1 = myurl.split("://")
self.opts[self.name_str + '_base_url_scheme'][0] = ts1[0]
t1 = ts1[1]
ts2 = t1.split(":")
self.opts[self.name_str + '_base_url_host'][0] = ts2[0]
self.opts[self.name_str + '_base_url_port'][0] = ts2[1]
# Use the following if your data source requries a password
# print("Please enter the password you wish to connect with:")
# tpass = ""
# self.ipy.ex("from getpass import getpass\ntpass = getpass(prompt='Connection Password: ')")
# tpass = self.ipy.user_ns['tpass']
# self.passwd = <PASSWORD>
# self.ipy.user_ns['tpass'] = ""
result = self.auth()
if result == 0:
self.connected = True
print("%s - %s Connected!" % (self.name_str.capitalize(), self.opts[self.name_str + '_base_url'][0]))
else:
print("Connection Error - Perhaps Bad Usename/Password?")
else:
print(self.name_str.capitalize() + "is already connected - Please type %" + self.name_str + " for help on what you can you do")
if self.connected != True:
self.disconnect()
def disconnect(self):
if self.connected == True:
print("Disconnected %s Session from %s" % (self.name_str.capitalize(), self.opts[self.name_str + '_base_url'][0])
else:
print("%s Not Currently Connected - Resetting All Variables" % self.name_str.capitalize())
self.session = None
self.connected = False
##### Where we left off
def auth(self):
self.session = None
result = -1
try:
# To do, allow settings hive setting from ENV
# self.session = hivemod.Connection(host=self.opts['base_url_host'][0], port=self.opts['base_url_port'][0], username=self.opts['user'][0])
result = 0
except:
print("%s Connection Error!" % self.name_str.capitalize())
result = -2
return result
def validateQuery(self, query):
bRun = True
bReRun = False
if self.last_query == query:
# If the validation allows rerun, that we are here:
bReRun = True
# Ok, we know if we are rerun or not, so let's now set the last_query
self.last_query = query
# Example Validation
# Warn only - Don't change bRun
# This one is looking for a ; in the query. We let it run, but we warn the user
# Basically, we print a warning but don't change the bRun variable and the bReRun doesn't matter
if query.find(";") >= 0:
print("WARNING - Do not type a trailing semi colon on queries, your query will fail (like it probably did here)")
# Warn and don't submit after first attempt - Second attempt go ahead and run
# If the query doesn't have a day query, then maybe we want to WARN the user and not run the query.
# However, if this is the second time in a row that the user has submitted the query, then they must want to run without day
# So if bReRun is True, we allow bRun to stay true. This ensures the user to submit after warnings
if query.lower().find("day = ") < 0:
print("WARNING - Queries shoud have day = component to ensure you don't have to many map tasks")
if bReRun == False:
print("First Submission - Not Sending to Server - Run again to submit as is")
bRun = False
else:
print("Query will be submitted ")
# Warn and do not allow submission
# There is no way for a user to submit this query
if query.lower().find('limit ") < 0:
print("ERROR - All queries must have a limit clause - Query will not submit without out")
bRun = False
return bRun
def runQuery(self, query):
mydf = None
status = "-"
starttime = int(time.time())
run_query = self.validateQuery(query)
if run_query:
if self.connected == True:
try:
mydf = pd.read_sql(query, self.session)
status = "Success"
except (TypeError):
status = "Success - No Results"
mydf = None
except Exception as e:
str_err = str(e)
if self.opts['verbose_errors'][0] == True:
status = "Failure - query_error: " + str_err
else:
msg_find = "errorMessage=\""
em_start = str_err.find(msg_find)
find_len = len(msg_find)
em_end = str_err[em_start + find_len:].find("\"")
str_out = str_err[em_start + find_len:em_start + em_end + find_len]
status = "Failure - query_error: " + str_out
else:
mydf = None
status = "%d Not Connected" % self.name_str.capitalize()
else:
status = "ValidationError"
mydf = None
endtime = int(time.time())
query_time = endtime - starttime
return mydf, query_time, status
# Display Help must be completely customized, please look at this Hive example
def displayCustomHelp(self):
print("jupyter_hive is a interface that allows you to use the magic function %hive to interact with an Hive installation.")
print("")
print("jupyter_hive has two main modes %hive and %%hive")
print("%hive is for interacting with a Hive installation, connecting, disconnecting, seeing status, etc")
print("%%hive is for running queries and obtaining results back from the Hive cluster")
print("")
print("%hive functions available")
print("###############################################################################################")
print("")
print("{: <30} {: <80}".format(*["%hive", "This help screen"]))
print("{: <30} {: <80}".format(*["%hive status", "Print the status of the Hive connection and variables used for output"]))
print("{: <30} {: <80}".format(*["%hive connect", "Initiate a connection to the Hive cluster, attempting to use the ENV variables for Hive URL and Hive Username"]))
print("{: <30} {: <80}".format(*["%hive connect alt", "Initiate a connection to the Hive cluster, but prompt for Username and URL regardless of ENV variables"]))
print("{: <30} {: <80}".format(*["%hive disconnect", "Disconnect an active Hive connection and reset connection variables"]))
print("{: <30} {: <80}".format(*["%hive set %variable% %value%", "Set the variable %variable% to the value %value%"]))
print("{: <30} {: <80}".format(*["%hive debug", "Sets an internal debug variable to True (False by default) to see more verbose info about connections"]))
print("")
print("Running queries with %%hive")
print("###############################################################################################")
print("")
print("When running queries with %%hive, %%hive will be on the first line of your cell, and the next line is the query you wish to run. Example:")
print("")
print("%%hive")
print("select * from `mydatabase`.`mytable`")
print("")
print("Some query notes:")
print("- If the number of results is less than pd_display.max_rows, then the results will be diplayed in your notebook")
print("- You can change pd_display.max_rows with %hive set pd_display.max_rows 2000")
print("- The results, regardless of display will be place in a Pandas Dataframe variable called prev_hive")
print("- prev_hive is overwritten every time a successful query is run. If you want to save results assign it to a new variable")
# This is the function that is actually called.
def displayHelp(self):
self.displayCustomHelp()
# This is the magic name. I left hive in for an example, this would equate to %hive
@line_cell_magic
def hive(self, line, cell=None):
# Handle all Line items %hive item1 %hive item2 etc
if cell is None:
line = line.replace("\r", "")
if line == "":
self.displayHelp()
elif line.lower() == "status":
self.retStatus()
elif line.lower() == "debug":
print("Toggling Debug from %s to %s" % (self.debug, not self.debug))
self.debug = not self.debug
elif line.lower() == "disconnect":
self.disconnect()
elif line.lower() == "connect alt":
self.connect(True)
elif line.lower() == "connect":
self.connect(False)
elif line.lower().find('set ') == 0:
self.setvar(line)
else:
print("I am sorry, I don't know what you want to do, try just %" + self.name_str + "for help options")
else: # This is run is the cell is not none, thus it's a cell to process - For us, that means a query
cell = cell.replace("\r", "")
if self.connected == True:
result_df, qtime, status = self.runQuery(cell)
if status.find("Failure") == 0:
print("Error: %s" % status)
elif status.find("Success - No Results") == 0:
print("No Results returned in %s seconds" % qtime)
else:
self.myip.user_ns['prev_' + self.name_str] = result_df
mycnt = len(result_df)
print("%s Records in Approx %s seconds" % (mycnt,qtime))
print("")
if mycnt <= int(self.opts['pd_display.max_rows'][0]):
if self.debug:
print("Testing max_colwidth: %s" % pd.get_option('max_colwidth'))
if self.opts['pd_use_beaker'][0] == True:
if self.opts['pd_beaker_bool_workaround'][0]== True:
for x in result_df.columns:
if result_df.dtypes[x] == 'bool':
result_df[x] = result_df[x].astype(object)
display(TableDisplay(result_df))
else:
display(HTML(result_df.to_html(index=self.opts['pd_display_idx'][0])))
else:
print("Number of results (%s) greater than pd_display_max(%s)" % (mycnt, self.opts['pd_display.max_rows'][0]))
else:
print(self.name_str.capitalize() + " is not connected: Please see help at %" + self.name_str + ")
def retStatus(self):
print("Current State of %s Interface:" % self.name_str.capitalize())
print("")
print("{: <30} {: <50}".format(*["Connected:", str(self.connected)]))
print("{: <30} {: <50}".format(*["Debug Mode:", str(self.debug)]))
print("")
print("Display Properties:")
print("-----------------------------------")
for k, v in self.opts.items():
if k.find("pd_") == 0:
try:
t = int(v[1])
except:
t = v[1]
if v[0] is None:
o = "None"
else:
o = v[0]
myrow = [k, o, t]
print("{: <30} {: <50} {: <20}".format(*myrow))
myrow = []
print("")
print("%s Properties:" % self.name_str.capitalize())
print("-----------------------------------")
for k, v in self.opts.items():
if k.find(self.name_str + "_") == 0:
if v[0] is None:
o = "None"
else:
o = str(v[0])
myrow = [k, o, v[1]]
print("{: <30} {: <50} {: <20}".format(*myrow))
myrow = []
def setvar(self, line):
pd_set_vars = ['pd_display.max_columns', 'pd_display.max_rows', 'pd_max_colwidth', 'pd_use_beaker']
allowed_opts = pd_set_vars + ['pd_replace_crlf', 'pd_display_idx', self.name_str + '_base_url']
tline = line.replace('set ', '')
tkey = tline.split(' ')[0]
tval = tline.split(' ')[1]
if tval == "False":
tval = False
if tval == "True":
tval = True
if tkey in allowed_opts:
self.opts[tkey][0] = tval
if tkey in pd_set_vars:
try:
t = int(tval)
except:
t = tval
pd.set_option(tkey.replace('pd_', ''), t)
else:
print("You tried to set variable: %s - Not in Allowed options!" % tkey)
``` |
{
"source": "JohnOmernik/jupyter_mssql",
"score": 2
} |
#### File: jupyter_mssql/mssql_core/mssql_base.py
```python
import json
import sys
import os
import time
import pandas as pd
from collections import OrderedDict
import requests
from integration_core import Integration
from pyodbc_core import Pyodbc
from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic, line_cell_magic)
from IPython.core.display import HTML
#import IPython.display
from IPython.display import display_html, display, Javascript, FileLink, FileLinks, Image
import ipywidgets as widgets
import jupyter_integrations_utility as jiu
# Put any additional imports specific to your integration here:
import pyodbc as po
@magics_class
class Mssql(Pyodbc):
# Static Variables
# The name of the integration
# The class name (Start) should be changed to match the name_str, but with the first letter upper cased.
name_str = "mssql"
instances = {}
# These are the ENV variables the integration will check when starting up. The integration_base prefix will be prepended in checking (that defaults to JUPYTER_)
# So the following two items will look for:
# JUPYTER_START_BASE_URL and put it into the opts dict as start_base_url
# JUPYTER_START_USER as put it in the opts dict as start_user
custom_evars = ["mssql_conn_default"]
# These are the variables in the opts dict that allowed to be set by the user. These are specific to this custom integration and are joined
# with the base_allowed_set_opts from the integration base
# The three examples here would be "start_base_url, start_ignore_ssl_warn, and start_verbose_errors
# Make sure these are defined in myopts!
custom_allowed_set_opts = ["mssql_conn_default"]
# These are the custom options for your integration
myopts = {}
# These are the custom options for your integration
myopts = {}
myopts['mssql_max_rows'] = [1000, 'Max number of rows to return, will potentially add this to queries']
myopts['mssql_conn_default'] = ["default", 'Default instance name for connections']
# Class Init function - Obtain a reference to the get_ipython()
def __init__(self, shell, debug=False, *args, **kwargs):
super(Impala, self).__init__(shell, debug=debug)
self.debug = debug
#Add local variables to opts dict
for k in self.myopts.keys():
self.opts[k] = self.myopts[k]
self.load_env(self.custom_evars)
self.parse_instances()
# Overriding Custom Query to handle thrift errors and auto matic resubmit
def customQuery(self, query, instance):
mydf = None
status = ""
resubmit = False
try:
self.session.execute(query)
mydf = self.as_pandas_DataFrame()
if mydf is not None:
status = "Success"
else:
status = "Success - No Results"
except Exception as e:
mydf = None
str_err = str(e)
if self.debug:
print("Error: %s" % str_err)
if str_err.find("Impala Thrift API") >= 0 and str_err.find("SSL_write: bad write retry") >= 0:
if resubmit == False:
# This is an old connection, let's just resubmit it (once)
print("SSL_write Thrift error detected - Likely Stale Connection - Attempting 1 retry")
try:
resubmit = True # First we make sure we only resubmit once
self.session.execute(query)
mydf = self.as_pandas_DataFrame()
if mydf is not None:
status = "Success"
else:
status = "Success - No Results"
except Exception as e1:
mydf = None
str_err1 = str(e1)
final_err = "First Run: %s\nSecond Run: %s" % (str_err, str_err1)
if self.debug:
print("Second Run Error: %s" % str_err1)
status = "Failure - query_error: " % final_err
else:
status = "Failure - query_error: " + str_err
return mydf, status
# def customDisconnect - In pyodbc
# def customAuth - In pyodbc
# def validateQuery - In pyodbc
# def customQuery - In pyodbc
# def customHelp - In pyodbc
def retCustomDesc(self):
return "Jupyter integration for working with MSSQL via PyODBC based data sources"
# This is the magic name.
@line_cell_magic
def mssql(self, line, cell=None):
if cell is None:
line = line.replace("\r", "")
line_handled = self.handleLine(line)
if self.debug:
print("line: %s" % line)
print("cell: %s" % cell)
if not line_handled: # We based on this we can do custom things for integrations.
if line.lower() == "testintwin":
print("You've found the custom testint winning line magic!")
else:
print("I am sorry, I don't know what you want to do with your line magic, try just %" + self.name_str + " for help options")
else: # This is run is the cell is not none, thus it's a cell to process - For us, that means a query
self.handleCell(cell, line)
``` |
{
"source": "JohnOmernik/jupyter_pyodbc",
"score": 2
} |
#### File: jupyter_pyodbc/pyodbc_core/pyodbc_base.py
```python
import json
import sys
import os
import time
import pandas as pd
from collections import OrderedDict
import requests
from integration_core import Integration
from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic, line_cell_magic)
from IPython.core.display import HTML
#import IPython.display
from IPython.display import display_html, display, Javascript, FileLink, FileLinks, Image
import ipywidgets as widgets
import jupyter_integrations_utility as jiu
# Put any additional imports specific to your integration here:
import pyodbc as po
@magics_class # Not sure about this, should pyodbc work by itself? Or should we
class Pyodbc(Integration):
# Static Variables
# The name of the integration
# The class name (Start) should be changed to match the name_str, but with the first letter upper cased.
name_str = "pyodbc"
instances = {}
# These are the ENV variables the integration will check when starting up. The integration_base prefix will be prepended in checking (that defaults to JUPYTER_)
# So the following two items will look for:
# JUPYTER_START_BASE_URL and put it into the opts dict as start_base_url
# JUPYTER_START_USER as put it in the opts dict as start_user
custom_evars = ["pyodbc_conn_default"]
# These are the variables in the opts dict that allowed to be set by the user. These are specific to this custom integration and are joined
# with the base_allowed_set_opts from the integration base
# The three examples here would be "start_base_url, start_ignore_ssl_warn, and start_verbose_errors
# Make sure these are defined in myopts!
custom_allowed_set_opts = ["pyodbc_conn_default"]
# These are the custom options for your integration
myopts = {}
myopts['pyodbc_max_rows'] = [1000, 'Max number of rows to return, will potentially add this to queries']
myopts['pyodbc_conn_default'] = ["default", 'Default instance name for connections']
# Class Init function - Obtain a reference to the get_ipython()
def __init__(self, shell, debug=False, *args, **kwargs):
super(Pyodbc, self).__init__(shell, debug=debug)
self.debug = debug
#Add local variables to opts dict
for k in self.myopts.keys():
self.opts[k] = self.myopts[k]
self.load_env(self.custom_evars)
self.parse_instances()
# We use a custom disconnect in pyodbc so we try to close the connection before nuking it
def customDisconnect(self, instance):
try:
self.instances[instance]['connection'].close()
except:
pass
self.instances[instance]['connection'] = None
self.instances[instance]['session'] = None
self.instances[instance]['connected'] = False
#self.instances[instance]['connect_pass'] = None # Should we clear the password when we disconnect? I am going to change this to no for now
def req_password(self, instance):
opts = None
retval = True
try:
opts = self.instances[instance]['options']
except:
print("Instance %s options not found" % instance)
try:
if opts['use_integrated_security'] == 1:
retval = False
except:
pass
return retval
def customAuth(self, instance):
result = -1
inst = None
int_sec = False
if instance not in self.instances.keys():
print("Instance %s not found in instances - Connection Failed" % instance)
result = -3
else:
inst = self.instances[instance]
if inst is not None:
try:
if inst['options']['use_integrated_security'] == 1:
int_sec = True
except:
pass
kar = [
["dsn", "DSN"], ["dbcname", "DBCNAME"], ["host", "Host"], ["port", "Port"], ["default_db", "Database"], ["authmech", "AuthMech"],
["usesasl", "UserSASL"], ["user", "UID"], ["enc_pass", "PWD"], ["usessl", "SSL"], ["allowselfsignedcert", "AllowSelfSignedServerCert"]
]
top_level = ["user", "host", "port", "enc_pass"]
var = []
conn_vars = []
for x in kar:
if x[0] in top_level:
if int_sec == True and x[0] in ["user", "enc_pass"]: # No need to put UID and PWD in connect string
pass
else:
try:
tval = inst[x[0]]
except:
tval = None
tkey = x[1]
if x[0] == "enc_pass":
tval = self.ret_dec_pass(tval)
inst['connect_pass'] = ""
else:
tval = self.checkvar(instance, x[0])
tkey = x[1]
if tval is not None:
conn_vars.append([tkey, tval])
conn_string = ""
for c in conn_vars:
conn_string += "%s=%s; " % (c[0], c[1])
conn_string = conn_string[0:-2]
#conn_string = "DSN=%s; Host=%s; Port=%s; Database=%s; AuthMech=%s; UseSASL=%s; UID=%s; PWD=%s; SSL=%s; AllowSelfSignedServerCert=%s" % (var[0], var[1], var[2], var[3], var[4], var[5], var[6], var[7], var[8], var[9])
try:
self.instances[instance]['connection'] = po.connect(conn_string, autocommit=True)
self.session = self.instances[instance]['connection'].cursor()
result = 0
except Exception as e:
str_err = str(e)
print("Unable to connect Error:\n%s" % str_err)
result = -2
# Here you can check if the authentication on connect is successful. If it's good, return 0, otherwise return something else and show an error
return result
def validateQuery(self, query, instance):
bRun = True
bReRun = False
if self.instances[instance]['last_query'] == query:
# If the validation allows rerun, that we are here:
bReRun = True
# Ok, we know if we are rerun or not, so let's now set the last_query (and last use if needed)
self.instances[instance]['last_query'] = query
if query.strip().find("use ") == 0:
self.instances[instance]['last_use'] = query
# Example Validation
# Warn only - Don't change bRun
# This one is looking for a ; in the query. We let it run, but we warn the user
# Basically, we print a warning but don't change the bRun variable and the bReRun doesn't matter
if query.find(";") >= 0:
print("WARNING - Do not type a trailing semi colon on queries, your query will fail (like it probably did here)")
# Warn and don't submit after first attempt - Second attempt go ahead and run
# If the query doesn't have a day query, then maybe we want to WARN the user and not run the query.
# However, if this is the second time in a row that the user has submitted the query, then they must want to run without day
# So if bReRun is True, we allow bRun to stay true. This ensures the user to submit after warnings
if query.lower().find("limit ") < 0:
print("WARNING - Queries shoud have a limit so you don't bonkers your DOM")
# Warn and do not allow submission
# There is no way for a user to submit this query
# if query.lower().find('limit ") < 0:
# print("ERROR - All queries must have a limit clause - Query will not submit without out")
# bRun = False
return bRun
def customQuery(self, query, instance):
mydf = None
status = ""
try:
self.session.execute(query)
mydf = self.as_pandas_DataFrame()
if mydf is not None:
status = "Success"
else:
status = "Success - No Results"
except Exception as e:
mydf = None
str_err = str(e)
if self.debug:
print("Error: %s" % str(e))
status = "Failure - query_error: " + str_err
return mydf, status
# Display Help can be customized
def customOldHelp(self):
self.displayIntegrationHelp()
self.displayQueryHelp("select * from mydatabase.mytable")
def retCustomDesc(self):
return "Jupyter integration for working with the PyODBC based data sources"
def customHelp(self, curout):
n = self.name_str
mn = self.magic_name
m = "%" + mn
mq = "%" + m
table_header = "| Magic | Description |\n"
table_header += "| -------- | ----- |\n"
out = curout
qexamples = []
qexamples.append(["myinstance", "select * from mydatabase.mytable", "Run a sql query against myinstance"])
qexamples.append(["", "select * from mydatabase.mytable", "Run a sql query against the default instance"])
out += self.retQueryHelp(qexamples)
return out
def as_pandas_DataFrame(self):
cursor = self.session
try:
names = [metadata[0] for metadata in cursor.description]
ret = pd.DataFrame([dict(zip(names, row)) for row in cursor], columns=names)
except:
ret = None
return ret
# This is the magic name.
@line_cell_magic
def pyodbc(self, line, cell=None):
if cell is None:
line = line.replace("\r", "")
line_handled = self.handleLine(line)
if self.debug:
print("line: %s" % line)
print("cell: %s" % cell)
if not line_handled: # We based on this we can do custom things for integrations.
if line.lower() == "testintwin":
print("You've found the custom testint winning line magic!")
else:
print("I am sorry, I don't know what you want to do with your line magic, try just %" + self.name_str + " for help options")
else: # This is run is the cell is not none, thus it's a cell to process - For us, that means a query
self.handleCell(cell, line)
``` |
{
"source": "JohnOmernik/jupyter_splunk",
"score": 2
} |
#### File: jupyter_splunk/splunk_core/splunk_base.py
```python
import json
import sys
import os
import time
import pandas as pd
from collections import OrderedDict
import re
from integration_core import Integration
import datetime
from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic, line_cell_magic)
from IPython.core.display import HTML
# Your Specific integration imports go here, make sure they are in requirements!
from splunklib import client as splclient
import jupyter_integrations_utility as jiu
#import IPython.display
from IPython.display import display_html, display, Javascript, FileLink, FileLinks, Image
import ipywidgets as widgets
@magics_class
class Splunk(Integration):
# Static Variables
# The name of the integration
name_str = "splunk"
instances = {}
custom_evars = ['splunk_conn_default']
# These are the variables in the opts dict that allowed to be set by the user. These are specific to this custom integration and are joined
# with the base_allowed_set_opts from the integration base
# These are the variables in the opts dict that allowed to be set by the user. These are specific to this custom integration and are joined
# with the base_allowed_set_opts from the integration base
custom_allowed_set_opts = ["splunk_conn_default", "splunk_search_mode", "splunk_default_earliest_time", "splunk_default_latest_time", "splunk_parse_times"]
myopts = {}
myopts['splunk_max_rows'] = [1000, 'Max number of rows to return, will potentially add this to queries']
myopts['splunk_conn_default'] = ["default", "Default instance to connect with"]
myopts["splunk_default_earliest_time"] = ["-15m", "The default earliest time sent to the Splunk server"]
myopts["splunk_default_latest_time"] = ["now", "The default latest time sent to the Splunk server"]
myopts["splunk_parse_times"] = [1, "If this is 1, it will parse your query for earliest or latest and get the value. It will not alter the query, but update the default earliest/latest for subqueries"]
myopts["splunk_search_mode"] = ["normal", "The search mode sent to the splunk server"]
myopts['splunk_output_mode'] = ["csv", "The output mode sent to the splunk server, don't change this, we rely on it being csv"]
# Class Init function - Obtain a reference to the get_ipython()
def __init__(self, shell, debug=False, *args, **kwargs):
super(Splunk, self).__init__(shell, debug=debug)
self.debug = debug
#Add local variables to opts dict
for k in self.myopts.keys():
self.opts[k] = self.myopts[k]
self.load_env(self.custom_evars)
self.parse_instances()
def customAuth(self, instance):
result = -1
inst = None
if instance not in self.instances.keys():
result = -3
print("Instance %s not found in instances - Connection Failed" % instance)
else:
inst = self.instances[instance]
if inst is not None:
inst['session'] = None
mypass = ""
if inst['enc_pass'] is not None:
mypass = self.ret_dec_pass(inst['enc_pass'])
inst['connect_pass'] = ""
try:
inst['session'] = splclient.connect(host=inst['host'], port=inst['port'], username=inst['user'], password=mypass)
result = 0
except:
print("Unable to connect to Splunk instance %s at %s" % (instance, inst["conn_url"]))
result = -2
return result
def validateQuery(self, query, instance):
bRun = True
bReRun = False
if self.instances[instance]['last_query'] == query:
# If the validation allows rerun, that we are here:
bReRun = True
# Ok, we know if we are rerun or not, so let's now set the last_query
self.instances[instance]['last_query'] = query
# Example Validation
# Warn only - Don't change bRun
# Basically, we print a warning but don't change the bRun variable and the bReRun doesn't matter
if query.find("search") != 0:
print("This query doesn't start with search, if it fails, you may want to add that (it doesn't infer it like the Splunk UI)")
print("")
if query.find(" or ") >= 0 or query.find(" and ") >= 0 or query.find(" Or ") >= 0 or query.find(" And ") >= 0:
print("Your query contains or, and, Or, or And - Splunk doesn't treat these as operators, and your results may not be what you want")
print("")
if query.find("[") >= 0 and query.find("]") >= 0:
print("Based on your use of square brackets [], you may be running a search with a subquery")
if self.opts['splunk_parse_times'][0] == 1:
print("You are having me parse the queries and set defaults, so if all works, your earliest and latest are passed to the subquery. (If you passed them!)")
else:
print("It doesn't appear you are having me parse query times. Thus, the earliest and latest ONLY apply to outer most part of your query. Results will be inconsistent")
print("")
if query.find("earliest") < 0:
print("Your query didn't contain the string earliest, and is likely using the default setting of earliest: %s" % (self.opts[self.name_str + "_default_earliest_time"][0]))
print("")
if query.find("latest") < 0:
print("Your query didn't contain the string latest, and is likely using the default setting of latest: %s" % (self.opts[self.name_str + "_default_latest_time"][0]))
print("")
# Warn and do not allow submission
# There is no way for a user to submit this query
# if query.lower().find('limit ") < 0:
# print("ERROR - All queries must have a limit clause - Query will not submit without out")
# bRun = False
return bRun
def parseTimes(self, query):
e_ret = None
l_ret = None
e_match = re.search(r"earliest ?= ?[\"\']?([^\s\'\"]+)[\s\"\']", query)
if e_match:
e_ret = e_match.group(1)
l_match = re.search(r"latest ?= ?[\"\']?([^\s\'\"]+)[\s\"\']", query)
if l_match:
l_ret = l_match.group(1)
return e_ret, l_ret
def splunkTime(self, intime):
# Converts the normal shitty splunk time to the other format it requires in the API
m = re.search("\d{1,2}\/\d{1,2}\/\d{4}", intime)
if m:
tmp_dt = datetime.datetime.strptime(intime, "%m/%d/%Y:%H:%M:%S")
outtime = tmp_dt.strftime("%Y-%m-%dT%H:%M:%S")
else:
outtime = intime
return outtime
def customQuery(self, query, instance, reconnect=True):
e_val = None
l_val = None
if self.opts["splunk_parse_times"][0] == 1:
if self.debug:
print("Attempting to parse earliest and latest times")
e_val, l_val = self.parseTimes(query)
if self.debug:
print("Value of Earliest parsed from query: %s" % e_val)
print("Value of Latest parsed from query: %s" % l_val)
if e_val is None:
e_val = self.checkvar(instance, 'splunk_default_earliest_time')
if l_val is None:
l_val = self.checkvar(instance, "splunk_default_latest_time")
e_val = self.splunkTime(e_val)
l_val = self.splunkTime(l_val)
kwargs_export = { "earliest_time": e_val, "latest_time": l_val, "search_mode": self.checkvar(instance, "splunk_search_mode"), "output_mode": self.checkvar(instance, "splunk_output_mode")}
if self.debug:
print("kwargs: %s" % kwargs_export)
print("query: %s" % query)
mydf = None
status = ""
str_err = ""
try:
results = self.instances[instance]['session'].jobs.export(query, **kwargs_export)
if results is not None:
mydf = pd.read_csv(results)
str_err = "Success"
else:
mydf = None
str_err = "Success - No Results"
except Exception as e:
mydf = None
str_err = str(e)
if str_err.find("Success") >= 0:
pass
elif str_err.find("No columns to parse from file") >= 0:
status = "Success - No Results"
mydf = None
elif str_err.find("Session is not logged in") >= 0:
# Try to rerun query
if reconnect == True:
self.disconnect(instance)
self.connect(instance)
m, s = self.customQuery(query, instance, False)
mydf = m
status = s
else:
mydf = None
status = "Failure - Session not logged in and reconnect failed"
else:
status = "Failure - query_error: " + str_err
return mydf, status
# Display Help can be customized
def customOldHelp(self):
self.displayIntegrationHelp()
self.displayQueryHelp('search term="MYTERM"')
def retCustomDesc(self):
return "Jupyter integration for working with the Splunk datasource"
def customHelp(self, curout):
n = self.name_str
mn = self.magic_name
m = "%" + mn
mq = "%" + m
table_header = "| Magic | Description |\n"
table_header += "| -------- | ----- |\n"
out = curout
qexamples = []
qexamples.append(["myinstance", "search term='MYTERM'", "Run a SPL (Splunk) query against myinstance"])
qexamples.append(["", "search term='MYTERM'", "Run a SPL (Splunk) query against the default instance"])
out += self.retQueryHelp(qexamples)
return out
# This is the magic name.
@line_cell_magic
def splunk(self, line, cell=None):
if cell is None:
line = line.replace("\r", "")
line_handled = self.handleLine(line)
if self.debug:
print("line: %s" % line)
print("cell: %s" % cell)
if not line_handled: # We based on this we can do custom things for integrations.
if line.lower() == "testintwin":
print("You've found the custom testint winning line magic!")
else:
print("I am sorry, I don't know what you want to do with your line magic, try just %" + self.name_str + " for help options")
else: # This is run is the cell is not none, thus it's a cell to process - For us, that means a query
self.handleCell(cell, line)
``` |
{
"source": "JohnOmernik/jupyter_taxii",
"score": 2
} |
#### File: jupyter_taxii/taxii_core/taxii_base.py
```python
import json
import sys
import os
import time
import pandas as pd
from collections import OrderedDict
from integration_core import Integration
from IPython.core.magic import (Magics, magics_class, line_magic, cell_magic, line_cell_magic)
from IPython.core.display import HTML
import taxii2client.v20
import stix2
import jupyter_integrations_utility as jiu
#import IPython.display
from IPython.display import display_html, display, Javascript, FileLink, FileLinks, Image
import ipywidgets as widgets
@magics_class
class Taxii(Integration):
# Static Variables
# The name of the integration
name_str = "taxii"
instances = {}
custom_evars = ['taxii_conn_default', 'taxii_group_collections', 'taxii_verify_ssl', 'taxii_suppress_https_warnings', 'taxii_path_to_certs']
# These are the variables in the opts dict that allowed to be set by the user. These are specific to this custom integration and are joined
# with the base_allowed_set_opts from the integration base
# These are the variables in the opts dict that allowed to be set by the user. These are specific to this custom integration and are joined
# with the base_allowed_set_opts from the integration base
custom_allowed_set_opts = ["taxii_conn_default", "taxii_verify_ssl", "taxii_suppress_https_warnings", "taxii_path_to_certs", "taxii_group_collections"]
allowed_ops = ['=', '!=', 'in', '>', '<', '>=', '<=', 'contains']
myopts = {}
myopts['taxii_conn_default'] = ["default", "Default instance to connect with"]
myopts['taxii_verify_ssl'] = [True, "Verify SSL connection is valid"]
myopts['taxii_suppress_https_warnings'] = [0, "Hide warnings about SSL issues"]
myopts['taxii_path_to_certs'] = ["", "Path to custom SSL bundle for taxii connections"]
myopts['taxii_group_collections'] = [0, "Group collections got query, if 0, we add fields: collection name and collection_id - May take longer"]
# Class Init function - Obtain a reference to the get_ipython()
def __init__(self, shell, debug=False, *args, **kwargs):
super(Taxii, self).__init__(shell, debug=debug)
self.debug = debug
#Add local variables to opts dict
for k in self.myopts.keys():
self.opts[k] = self.myopts[k]
self.load_env(self.custom_evars)
self.parse_instances()
def customDisconnect(self, instance):
self.instances[instance]['session'] = None
self.instances[instance]['connected'] = False
self.instances[instance]['server'] = None
self.instances[instance]['api_root'] = None
self.instances[instance]['taxii_collections'] = None
def customAuth(self, instance):
result = -1
inst = None
breqAuth = False
# JUPYTER_TAXII_CONN_URL_DEFAULT="https://cti-taxii.mitre.org/taxii"
# %taxiiuser@https://cti-taxii.mitre.org:443?path=/taxii&useproxy=1&authreq=0
if int(self.opts['taxii_suppress_https_warnings'][0]) == 1 or self.opts['taxii_suppress_https_warnings'][0] == True:
import warnings
warnings.filterwarnings('ignore', "Unverified HTTPS request is being made")
if instance not in self.instances.keys():
result = -3
print("Instance %s not found in instances - Connection Failed" % instance)
else:
inst = self.instances[instance]
if inst is not None:
if inst['options'].get('useproxy', 0) == 1:
proxies = self.retProxy(instance)
inst['proxies'] = proxies
else:
inst['proxies'] = None
if 'authreq' in inst['options']:
if inst['options']['authreq'] == True or inst['options']['authreq'] == 1:
breqAuth = True
if int(self.opts['taxii_verify_ssl'][0]) == 0 or self.opts['taxii_verify_ssl'][0] == False:
myverify = False
else:
myverify = True
if self.debug:
print("myverify: %s" % myverify)
myurl = inst['scheme'] + "://" + inst['host'] + ":" + str(inst['port']) + inst['options'].get('path', '/')
inst['full_url'] = myurl
if self.debug:
print(inst['full_url'])
inst['session'] = None
if breqAuth:
print("Taxii Auth not yet handled")
else:
try:
inst['server'] = taxii2client.v20.Server(inst['full_url'], verify=myverify, proxies=inst['proxies'])
inst['api_root'] = inst['server'].api_roots[0] # Maybe do multiple?
inst['taxii_collections'] = []
for tc in inst['api_root'].collections:
# inst['taxii_collections'].append(stix2.TAXIICollectionSource(tc))
inst['taxii_collections'].append(tc)
if self.debug:
print("Added %s (ID: %s) to collections" % (tc.title, tc.id))
result = 0
except Exception as e:
print("Unable to connect to Taxii instance %s at %s" % (instance, inst["conn_url"]))
print("Exception: %s" % e)
result = -2
return result
######## Where I got
def validateQuery(self, query, instance):
bRun = True
bReRun = False
if self.instances[instance]['last_query'] == query:
# If the validation allows rerun, that we are here:
bReRun = True
# Ok, we know if we are rerun or not, so let's now set the last_query
self.instances[instance]['last_query'] = query
curquery = self.formatQuery(query)
for q in curquery:
if q[1] not in self.allowed_ops:
print("Query ['%s' '%s' '%s'] using an operator (%s) that is not supported (%s) - Query may fail or produce unwanted results" % (q[0], q[1], q[2], q[1], self.allowed_ops))
print("Query not submitted")
bRun = False
# Example Validation
# Warn only - Don't change bRun
# Basically, we print a warning but don't change the bRun variable and the bReRun doesn't matter
# Warn and do not allow submission There is no way for a user to submit this query
# if query.lower().find('limit ") < 0:
# print("ERROR - All queries must have a limit clause - Query will not submit without out")
# bRun = False
return bRun
def req_password(self, instance):
bAuth = self.instances[instance]['options'].get('authreq', 0)
if int(bAuth) == 0:
bAuth = False
if int(bAuth) == 1:
bAuth = True
return bAuth
def req_username(self, instance):
bAuth = self.instances[instance]['options'].get('authreq', 0)
if int(bAuth) == 0:
bAuth = False
elif int(bAuth) == 1:
bAuth = True
return bAuth
def formatQuery(self, query):
retfilter = []
qlines = query.strip().split("\n")
for ql in qlines:
qi = ql.split(" ")
qprop = qi[0].strip()
qop = qi[1].strip()
qval = " ".join(qi[2:]).strip()
if self.debug:
print("'%s' '%s' '%s'" % (qprop, qop, qval))
retfilter.append([qprop, qop, qval])
return retfilter
def retQueryFilter(self, qlist):
retval = []
for q in qlist:
retval.append(stix2.Filter(q[0], q[1], q[2]))
return retval
def customQuery(self, query, instance, reconnect=True):
mydf = None
status = ""
str_err = ""
out_res_df = pd.DataFrame()
inst = self.instances[instance]
qlist = self.formatQuery(query)
qfilter = self.retQueryFilter(qlist)
try:
if self.opts['taxii_group_collections'][0] == 1:
searcher = stix2.CompositeDataSource()
searcher.add_data_sources([stix2.TAXIICollectionSource(c) for c in inst['taxii_collections']])
tres = searcher.query(qfilter)
for r in tres:
try:
tdf = pd.json_normalize(json.loads(r.serialize()))
if len(tdf) > 0:
out_res_df = pd.concat([out_res_df, tdf], ignore_index=True)
except Exceptions as e:
if self.debug:
print("Error grouped: %s" % e)
else:
for c in inst['taxii_collections']:
c_title = c.title
c_id = c.id
searcher = stix2.CompositeDataSource()
searcher.add_data_sources([stix2.TAXIICollectionSource(c)])
tres = searcher.query(qfilter)
for r in tres:
try:
tdf = pd.json_normalize(json.loads(r.serialize()))
if len(tdf) > 0:
tdf['collection_name'] = c_title
tdf['collection_id'] = c_id
out_res_df = pd.concat([out_res_df, tdf], ignore_index=True)
except Exception as e:
if self.debug:
print("Error ungrouped: %s" % e)
if len(out_res_df) == 0:
mydf = None
str_err = "Success - No Results"
elif len(out_res_df) > 0:
mydf = out_res_df
str_err = "Success"
except Exception as e:
mydf = None
str_err = str(e)
if str_err.find("Success") >= 0:
pass
elif str_err.find("Session is not logged in") >= 0: # Need to fix this
# Try to rerun query
if reconnect == True:
self.disconnect(instance)
self.connect(instance)
m, s = self.customQuery(query, instance, False)
mydf = m
status = s
else:
mydf = None
status = "Failure - Session not logged in and reconnect failed"
else:
status = "Failure - query_error: " + str_err
return mydf, status
# Display Help can be customized
def customOldHelp(self):
self.displayIntegrationHelp()
self.displayQueryHelp('external_references.external_id = T1134.001')
def retCustomDesc(self):
return "Jupyter integration for working with Taxii Servers"
def customHelp(self, curout):
n = self.name_str
mn = self.magic_name
m = "%" + mn
mq = "%" + m
table_header = "| Magic | Description |\n"
table_header += "| -------- | ----- |\n"
out = curout
out += "## Taxii Queries/Syntax\n"
out += "----------------\n"
out += "A Taxii Filter is 'property' 'operator' value' as seen below in example\n"
out += "- You may use multiple lines of filters, they are ANDed together\n"
out += "- operator must be in %s\n" % self.allowed_ops
out += "\n\n"
qexamples = []
qexamples.append(["myinstance", "external_references.external_id = T1134.001", "Run a Taxii query on instance myinstance where the property is external_references.external_id, the operator is = and the value is T1134.001"])
qexamples.append(["", "external_references.external_id = T1134.001", "Run a Taxii query on the default where the property is external_references.external_id, the operator is = and the value is T1134.001"])
out += self.retQueryHelp(qexamples)
return out
def displayQueryHelp(self, q_example):
n = self.name_str
m = "%" + self.name_str
mq = "%" + m
print("")
print("Running queries with %s" % mq)
print("###############################################################################################")
print("")
print("When running queries with %s, %s will be on the first line of your cell, with an optional instance and the next line is the query you wish to run. Example:" % (mq, mq))
print("")
print(mq)
print(q_example)
print("")
print(mq + " myinstance")
print(q_example)
print("")
print("Some query notes:")
print("A Taxii Filter is 'property' 'operator' value' as seen above")
print("- You may use multiple lines of filters, they are ANDed together")
print("- operator must be in %s" % self.allowed_ops)
print("- If the number of results is less than display_max_rows, then the results will be diplayed in your notebook")
print("- You can change display_max_rows with %s set display_max_rows 2000" % m)
print("- The results, regardless of being displayed will be placed in a Pandas Dataframe variable called prev_%s_<instance>" % n)
print("- prev_%s_<instance> is overwritten every time a successful query is run. If you want to save results assign it to a new variable" % n)
# This is the magic name.
@line_cell_magic
def taxii(self, line, cell=None):
if cell is None:
line = line.replace("\r", "")
line_handled = self.handleLine(line)
if self.debug:
print("line: %s" % line)
print("cell: %s" % cell)
if not line_handled: # We based on this we can do custom things for integrations.
if line.lower() == "testintwin":
print("You've found the custom testint winning line magic!")
else:
print("I am sorry, I don't know what you want to do with your line magic, try just %" + self.name_str + " for help options")
else: # This is run is the cell is not none, thus it's a cell to process - For us, that means a query
self.handleCell(cell, line)
``` |
{
"source": "JohnOmernik/pimeup",
"score": 3
} |
#### File: pimeup/animatronics/JO_sound3.py
```python
import alsaaudio
import wave
import sys
import time
import struct
import math
channels = 2
rate = 44000
size = 1024
out_stream = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, alsaaudio.PCM_NORMAL, 'default')
out_stream.setformat(alsaaudio.PCM_FORMAT_S16_LE)
out_stream.setchannels(channels)
out_stream.setrate(rate)
out_stream.setperiodsize(size)
file = open("whizzer.wav", "rb")
SHORT_NORMALIZE = (1.0/32768.0)
CHUNK = 1024
swidth = 2
thres = 30
mouthpos = 0.0
# instantiate PyAudio (1)
def rms(frame):
count = len(frame)/swidth
format = "%dh"%(count)
shorts = struct.unpack( format, frame )
sum_squares = 0.0
for sample in shorts:
n = sample * SHORT_NORMALIZE
sum_squares += n*n
rms = math.pow(sum_squares/count,0.5);
return rms * 10000
data = file.read(size)
def moveMouth(sig):
global mouthpos
if mouthpos == 0.0:
if sig >= thres:
print("Opening Mouth")
mouthpos = 0.5
if mouthpos == 0.5:
if sig >= thres:
print("Closing Mouth")
mouthpos = 0.0
while data:
out_stream.write(data)
data = file.read(size)
moveMouth(rms(data))
```
#### File: animatronics/oldPWM/JO_Servo.py
```python
from Adafruit_PWM_Servo_Driver import PWM
import time
import sys
# ===========================================================================
# Example Code
# ===========================================================================
# Initialise the PWM device using the default address
pwm = PWM(0x40)
# Note if you'd like more debug output you can instead run:
#pwm = PWM(0x40, debug=True)
servoMin = 150 # Min pulse length out of 4096
servoMax = 600 # Max pulse length out of 4096
def setServoPulse(channel, pulse):
pulseLength = 1000000 # 1,000,000 us per second
pulseLength /= 60 # 60 Hz
print "%d us per period" % pulseLength
pulseLength /= 4096 # 12 bits of resolution
print "%d us per bit" % pulseLength
pulse *= 1000
pulse /= pulseLength
pwm.setPWM(channel, 0, pulse)
pwm.setPWMFreq(60) # Set frequency to 60 Hz
#little low = 150
#
while True:
u = raw_input("Set pulse (e to exit): ")
if str(u) == "e":
sys.exit(0)
try:
u = int(u)
pwm.setPWM(3, 0, u)
except:
print("Not an int: %s - try again" % u)
f = """
while (True):
# Change speed of continuous servo on channel O
pwm.setPWM(0, 0, servoMin)
time.sleep(0.5)
pwm.setPWM(1, 0, servoMin)
time.sleep(0.5)
pwm.setPWM(2, 0, servoMin)
time.sleep(0.5)
pwm.setPWM(0, 0, servoMax)
time.sleep(0.5)
pwm.setPWM(1, 0, servoMax)
time.sleep(0.5)
pwm.setPWM(2, 0, servoMax)
time.sleep(0.5)
"""
```
#### File: pimeup/hand_servo/JO_pwr_test.py
```python
import Adafruit_PCA9685
import time
import random
import sys
# Initialise the PCA9685 using the default address (0x40).
pwm = Adafruit_PCA9685.PCA9685(0x40)
pwm.set_pwm_freq(60)
print("PWM Setup")
SRV_OPTIONS = []
SRV_OPTIONS.append({"SRV": 0, "DESC":"Thumb", "RANGE_MIN": 275, "RANGE_MAX": 575})
SRV_OPTIONS.append({"SRV": 1, "DESC":"Pointer", "RANGE_MIN": 300, "RANGE_MAX": 575})
SRV_OPTIONS.append({"SRV": 2, "DESC":"Middle", "RANGE_MIN": 325, "RANGE_MAX": 575})
SRV_OPTIONS.append({"SRV": 3, "DESC":"Ring", "RANGE_MIN": 275, "RANGE_MAX": 550})
SRV_OPTIONS.append({"SRV": 4, "DESC":"Pinky", "RANGE_MIN": 300, "RANGE_MAX": 575})
SRV_OPTIONS.append({"SRV": 5, "DESC":"WristFlex", "RANGE_MIN": 300, "RANGE_MAX": 600})
SRV_OPTIONS.append({"SRV": 6, "DESC":"WristTurn", "RANGE_MIN": 135, "RANGE_MAX": 650})
power_brightness = "/sys/devices/platform/leds/leds/led1/brightness"
def main():
sleeptime = 5
maxoff = 3
numoff = 0
runwell = True
maxtimes = 100
numtimes = 0
print("Beginning Loop in starting fist")
fistsalute()
handlist = [ fistsalute, queenswave, naughty, pointy, comehere, rockon ]
curidx = 0
while runwell:
try:
curtime = int(time.time())
curts = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(curtime))
with open(power_brightness) as read_state:
curpwr = int(read_state.read())
curpwr = True
numtimes += 1
#curidx = random.randint(0, len(handlist)-1)
print("Picking: %s" % curidx)
handlist[curidx]()
if curidx == len(handlist) - 1:
curidx = 0
else:
curidx += 1
if curpwr == 0:
numoff += 1
else:
numoff = 0
if numoff >= maxoff:
print("Power off for %s in a row @ %s - exiting" % (maxoff, curts))
runwell = False
elif numtimes >= maxtimes and maxtimes > 0:
print("Max times reached @ %s - Power: %s" % (curts, curpwr))
runwell = False
else:
print("Power Status @ %s: %s" % (curts, curpwr))
time.sleep(1)
fistsalute()
time.sleep(sleeptime)
except KeyboardInterrupt:
closeitdown()
closeitdown()
def closeitdown():
print("Closing it down")
pwm.set_pwm(0, 4096, 0)
pwm.set_pwm(1, 4096, 0)
pwm.set_pwm(2, 4096, 0)
pwm.set_pwm(3, 4096, 0)
pwm.set_pwm(4, 4096, 0)
pwm.set_pwm(5, 4096, 0)
pwm.set_pwm(6, 4096, 0)
sys.exit(0)
#SRV_OPTIONS.append({"SRV": 0, "DESC":"Thumb", "RANGE_MIN": 275, "RANGE_MAX": 575})
#SRV_OPTIONS.append({"SRV": 1, "DESC":"Pointer", "RANGE_MIN": 300, "RANGE_MAX": 575})
#SRV_OPTIONS.append({"SRV": 2, "DESC":"Middle", "RANGE_MIN": 325, "RANGE_MAX": 600})
#SRV_OPTIONS.append({"SRV": 3, "DESC":"Ring", "RANGE_MIN": 275, "RANGE_MAX": 550})
#SRV_OPTIONS.append({"SRV": 4, "DESC":"Pinky", "RANGE_MIN": 300, "RANGE_MAX": 575})
#SRV_OPTIONS.append({"SRV": 5, "DESC":"WristFlex", "RANGE_MIN": 250, "RANGE_MAX": 650})
#SRV_OPTIONS.append({"SRV": 6, "DESC":"WristTurn", "RANGE_MIN": 225, "RANGE_MAX": 625})
#thumb: 275 in 575 out
#pointer: 300 up 575 down
#middle: 325 up 575 down
#ring: 275 up 550 down
#pinky: 300 up 575 down
#wristflex: 300 forward 600 back
#wristturn: 135 staight front of hand- rotate twoard thum 650 back of hand forward Past straight on. 620 straigh on with back of hand
def rockon():
print("Party on!")
makefist()
pwm.set_pwm(0, 0, 575)
pwm.set_pwm(1, 0, 300)
pwm.set_pwm(4, 0, 300)
time.sleep(4)
def pointy():
print("That's the guy...")
makefist()
pwm.set_pwm(6, 0, 135)
pwm.set_pwm(5, 0, 300)
pwm.set_pwm(1, 0, 300)
time.sleep(0.5)
pwm.set_pwm(5, 0, 350)
time.sleep(0.4)
pwm.set_pwm(5, 0, 300)
time.sleep(0.4)
pwm.set_pwm(5, 0, 350)
time.sleep(0.4)
pwm.set_pwm(5, 0, 300)
time.sleep(0.4)
pwm.set_pwm(5, 0, 350)
def comehere():
print("I beckon")
makefist()
pwm.set_pwm(6, 0, 620)
pwm.set_pwm(5, 0, 600)
time.sleep(0.5)
pwm.set_pwm(1, 0, 300)
time.sleep(0.5)
pwm.set_pwm(1, 0, 575)
time.sleep(0.5)
pwm.set_pwm(1, 0, 300)
time.sleep(0.5)
pwm.set_pwm(1, 0, 575)
time.sleep(0.5)
pwm.set_pwm(1, 0, 300)
time.sleep(0.5)
pwm.set_pwm(1, 0, 575)
def naughty():
print("Oh you filthy...")
makefist()
pwm.set_pwm(6, 0, 620)
pwm.set_pwm(2, 0, 325)
time.sleep(4)
def makefist():
print("Fists!")
pwm.set_pwm(0, 0, 275)
pwm.set_pwm(1, 0, 575)
pwm.set_pwm(2, 0, 600)
pwm.set_pwm(3, 0, 550)
pwm.set_pwm(4, 0, 575)
time.sleep(0.4)
def fistsalute():
makefist()
print("Power to the People!")
pwm.set_pwm(5, 0, 500)
pwm.set_pwm(6, 0, 135)
def queenswave():
print("Waving like the queen!")
# Set Fingers open with thumb in
pwm.set_pwm(0, 0, 275)
pwm.set_pwm(1, 0, 375)
pwm.set_pwm(2, 0, 300)
pwm.set_pwm(3, 0, 325)
pwm.set_pwm(4, 0, 375)
#set wrist up
time.sleep(0.2)
pwm.set_pwm(5, 0, 500)
time.sleep(0.2)
pwm.set_pwm(6, 0, 135)
time.sleep(1)
pwm.set_pwm(6, 0, 160)
time.sleep(1)
pwm.set_pwm(6, 0, 136)
time.sleep(1)
pwm.set_pwm(6, 0, 160)
time.sleep(1)
pwm.set_pwm(6, 0, 135)
time.sleep(1)
pwm.set_pwm(6, 0, 160)
time.sleep(1)
def setServoPulse(channel, pulse):
pulseLength = 1000000 # 1,000,000 us per second
pulseLength /= 60 # 60 Hz
print "%d us per period" % pulseLength
pulseLength /= 4096 # 12 bits of resolution
print "%d us per bit" % pulseLength
pulse *= 1000
pulse /= pulseLength
pwm.set_pwm(channel, 0, pulse)
if __name__ == "__main__":
main()
```
#### File: pimeup/hand_servo/JO_tester.py
```python
import Adafruit_PCA9685
import time
import random
import sys
import json
# Initialise the PCA9685 using the default address (0x40).
pwm = Adafruit_PCA9685.PCA9685(0x40)
pwm.set_pwm_freq(60)
SRV_OPTIONS = []
ACTIONS = {}
STATUS=""
thingfile = "/home/pi/pimeup/thingbox/thing.json"
thingactionfile = "/home/pi/pimeup/thingbox/thingactions.json"
def main():
global SRV_OPTIONS
global ACTIONS
global STATUS
SRV_OPTIONS = loadfile(thingfile)
ACTIONS = loadfile(thingactionfile)
cur_finger = -1
ACT_SHORT = []
upact = ""
downact = ""
for x in ACTIONS:
if x['KEY'] == "U":
upact = x['ACTION']
if x['KEY'] == "P":
downact = x['ACTION']
ACT_SHORT.append(x['KEY'])
# processAction(upact)
while True:
if cur_finger == -1:
print("Current Status: %s" % STATUS)
printServos()
printAction()
print("")
srv_sel = raw_input("Servo to move or action: ")
int_srv = -1
if srv_sel == "e":
print("Exiting!")
break
if srv_sel in ACT_SHORT:
processAction(srv_sel)
else:
try:
int_srv = int(srv_sel)
except:
print("Selected Servors must be an integer or action in this list:")
printServos()
printAction()
continue
for y in SRV_OPTIONS:
if int_srv == y['IDX']:
cur_finger = int_srv
break
if cur_finger == int_srv:
continue
else:
print("Servo provided (%s) not in the following List: Please try again")
printServos()
else:
for y in SRV_OPTIONS:
if cur_finger == y['IDX']:
mysrv = y
break
print("Currently working with Servo: %s - Press q to quit this" % cur_finger)
printServo(mysrv)
while True:
mv = raw_input("Enter Servo Value: ")
if mv == 'q':
cur_finger = -1
break
else:
try:
myval = int(mv)
except:
print("You must enter a integer")
continue
pwm.set_pwm(cur_finger, 0, myval)
processAction(downact)
time.sleep(2)
pwm.set_pwm(0, 4096, 0)
pwm.set_pwm(1, 4096, 0)
pwm.set_pwm(2, 4096, 0)
pwm.set_pwm(3, 4096, 0)
pwm.set_pwm(4, 4096, 0)
pwm.set_pwm(5, 4096, 0)
pwm.set_pwm(6, 4096, 0)
pwm.set_pwm(7, 4096, 0)
pwm.set_pwm(8, 4096, 0)
sys.exit(0)
def printServos():
print("")
print ("All Available Servos: ")
print("==============================")
for x in SRV_OPTIONS:
printServo(x)
print("")
def printServo(s):
print("Servo Number: %s - Desc: %s - Min Movement: %s - Max Movement: %s - Notes: %s" % (s['IDX'], s['DESC'], s['RANGE_MIN'], s['RANGE_MAX'], s['NOTES']))
def printAction():
print("")
print("Available Actions: ")
print("==============================")
for x in ACTIONS:
print("\t%s - %s - %s" % (x['KEY'], x['NAME'], x['DESC']))
print("")
def loadfile(f):
o = open(f, "rb")
tj = o.read()
o.close()
pj = ""
for line in tj.split("\n"):
if line.strip() == "" or line.strip().find("#") == 0:
pass
else:
pj += line.strip() + "\n"
print(pj)
return json.loads(pj)
def processAction(actKey):
global STATUS
act = {}
bfound = False
for x in ACTIONS:
if actKey == x['KEY']:
act = x
bfound = True
if bfound == True:
new_status = act['STATUS']
req_status = act['REQ_STATUS']
actStr = act['ACTION']
if req_status != "":
if STATUS.find(req_status) < 0:
print("Can't do it")
print("STATUS: %s" % STATUS)
print("req_status: %s" % req_status)
return
print("Running Action: %s" % act['NAME'])
for action in actStr.split(","):
tval = action.split(":")
act = tval[0]
val = tval[1]
if act == "P":
val = float(val)
time.sleep(val)
elif act == "A":
shutdown = False
try:
val = int(val)
if val == 0:
shutdown = True
except:
shutdown = False
if shutdown == True:
for x in range(len(SRV_OPTIONS) - 1):
pwm.set_pwm(x, 4096, 0)
else:
processAction(val)
else:
act = int(act)
val = int(val)
if val >= 0:
pwm.set_pwm(act, 0, val)
else:
pwm.set_pwm(act, 4096, 0)
if new_status != "":
STATUS = new_status
def setServoPulse(channel, pulse):
pulseLength = 1000000 # 1,000,000 us per second
pulseLength /= 60 # 60 Hz
print "%d us per period" % pulseLength
pulseLength /= 4096 # 12 bits of resolution
print "%d us per bit" % pulseLength
pulse *= 1000
pulse /= pulseLength
pwm.set_pwm(channel, 0, pulse)
if __name__ == "__main__":
main()
```
#### File: pimeup/relay/relay_basic.py
```python
import RPi.GPIO as GPIO
import time
#GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM)
#set GPIO Pins
GPIO_RELAY = 16
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_RELAY, GPIO.OUT)
def main ():
print("Setting to off")
GPIO.output(GPIO_RELAY, False)
print("Waiting 5 seconds")
time.sleep(5)
print("Setting to on")
GPIO.output(GPIO_RELAY, True)
print("Waiting 5 seconds")
time.sleep(5)
print("Setting to off")
GPIO.output(GPIO_RELAY, False)
if __name__ == '__main__':
main()
```
#### File: pimeup/thingbox/thingbox.py
```python
import Adafruit_PCA9685
import time
import random
import sys
import socket
import json
from socket import error as socket_error
import RPi.GPIO as GPIO
import cwiid
#GPIO Mode (BOARD / BCM)
GPIO.setmode(GPIO.BCM)
#set GPIO Pins
GPIO_MODE= 12
#set GPIO direction (IN / OUT)
GPIO.setup(GPIO_MODE, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# Initialise the PCA9685 using the default address (0x40).
pwm = Adafruit_PCA9685.PCA9685(0x40)
pwm.set_pwm_freq(60)
SRV_OPTIONS = []
ACTIONS = {}
HOMENET = 0
NETWORK = 1
STATUS = ""
wiimode = 0
if GPIO.input(GPIO_MODE) == 1:
print("Wii mode is enabled! (Network and command line will not function)")
wiimode = 1
elif GPIO.input(GPIO_MODE) == 0:
print("Wii mode is not enabled")
wiimode = 0
try:
chknet = sys.argv[1]
print("Chknet: %s" % chknet)
if int(chknet) == 2: # No network command line interface
NETWORK = 0
wiimode = 0 # We reset this back
elif int(chknet) == 1 and wiimode == 0: # Use home net ip of 192.168.0.130
HOMENET = 1
except:
if wiimode == 0:
NETWORK = 1
else:
NETWORK = 0
thingfile = "/home/pi/pimeup/thingbox/thing.json"
thingactionfile = "/home/pi/pimeup/thingbox/thingactions.json"
STATUS_OPT = [ 'LIDUP', 'HANDUPLIDUP', 'HANDDOWNLIDUP', 'HANDDOWNLIDDOWN' ]
DEBUG = 1
BUT_DEBUG = 0
NET_DEBUG = 0
if HOMENET == 1:
UDP_IP = '192.168.0.130'
else:
UDP_IP = '192.168.1.110'
print("UDP IP is %s" % UDP_IP)
UDP_PORT = 30000
UDP_BUFFER_SIZE = 5
rpt_mode = 0
wiimote = None
connected = False
rumble = 0
b_val = False
status_chk = True
def main():
global SRV_OPTIONS
global ACTIONS
global STATUS
global wiimote
global rumble
global rpt_mode
global connected
global b_val
global status_chk
SRV_OPTIONS = loadfile(thingfile)
ACTIONS = loadfile(thingactionfile)
for x in SRV_OPTIONS:
print(x)
printActions()
cur_finger = -1
ACT_SHORT = []
upact = ""
downact = ""
STATUS="HANDDOWNLIDDOWN"
for x in ACTIONS:
if x['KEY'] == "U":
upact = x['ACTION']
if x['KEY'] == "P":
downact = x['ACTION']
ACT_SHORT.append(x['KEY'])
if NETWORK:
print("Listening on %s:%s" % (UDP_IP, UDP_PORT))
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((UDP_IP, UDP_PORT))
if wiimode == 1:
# Setup Wii remote
print ("Press 1+2 to connect Wii")
while not connected:
try:
wiimote = cwiid.Wiimote()
print("Connected!")
connected = True
rumble ^= 1
wiimote.rumble = rumble
time.sleep(2)
rumble ^= 1
wiimote.rumble = rumble
except:
print("Trying Again, please press 1+2")
time.sleep(2)
# Now setup Wii Callback, Buttons, and Accelerometer
wiimote.mesg_callback = callback
# For Thing box mode we enable Button
rpt_mode ^= cwiid.RPT_BTN
# Enable the messages in callback
wiimote.enable(cwiid.FLAG_MESG_IFC);
wiimote.rpt_mode = rpt_mode
try:
while True:
if NETWORK:
data, addr = sock.recvfrom(UDP_BUFFER_SIZE)
elif wiimode == 0:
data = raw_input("Please Enter Raw Command: ")
else: # This is wii mode
time.sleep(0.5)
continue
if data:
if DEBUG or NET_DEBUG:
print("Recieved Data Update: %s" % data)
if data == "PINGM" or data == "PINGA":
pingtime = int(time.time())
response = data.replace("PING", "GOOD")
sock.sendto(response, addr)
if DEBUG or NET_DEBUG:
print("Got ping at %s, sent pong" % pingtime)
continue
if data.find(":") >= 0:
pass
else:
mdata = -10000
try:
mdata = int(data)
except:
mdata = -10000
if mdata > -10000:
data = "A:" + str(mdata)
elif data == "i":
for x in SRV_OPTIONS:
print(x)
printActions()
continue
elif len(data) == 1:
data = "A:" + data
else:
print("I think its bad: %s" % data)
continue
tdata = data.split(":")
cmdkey = tdata[0]
cmdval = tdata[1]
if str(cmdkey) == "A" and cmdval in ACT_SHORT:
processAction(cmdval)
elif str(cmdkey) == "C" and cmdval != "":
if int(cmdval) == 0:
status_chk = False
elif int(cmdval) == 1:
status_chk = True
elif str(cmdkey) == "S" and cmdval != "":
if cmdval in STATUS_OPT:
STATUS = cmdval
else:
print("Status needs to be in %s" % STATUS_OPT)
else:
try:
cmdkey = int(cmdkey)
cmdval = int(cmdval)
except:
print("cmdkey must be A or an integer")
continue
setfingerperc(cmdkey, cmdval)
except socket_error:
exitGracefully()
except KeyboardInterrupt:
exitGracefully()
def handle_buttons(buttons):
global b_val
global STATUS
global SENSORS
# The B (trigger) Button does cool things for us
# When pressed that allows the glove sensors to be read and sent
# It also changes what the other buttons do
if (buttons & cwiid.BTN_B):
b_val = True
else:
b_val = False
if (buttons & cwiid.BTN_UP):
if b_val == True:
processAction("O")
elif (buttons & cwiid.BTN_DOWN):
if b_val == True:
processAction("C")
elif (buttons & cwiid.BTN_LEFT):
processAction("W")
elif (buttons & cwiid.BTN_RIGHT):
if b_val == True:
processAction("B")
elif (buttons & cwiid.BTN_1):
if b_val == False:
processAction("M")
else:
processAction("S")
elif (buttons & cwiid.BTN_2):
if b_val == True:
processAction("F")
else:
processAction("P")
elif (buttons & cwiid.BTN_PLUS):
processAction("G")
elif (buttons & cwiid.BTN_MINUS):
# Locks the wrist up
processAction("L")
elif (buttons & cwiid.BTN_A):
# A will summon thing if B is not pressed, and put him a way if B is pressed
if b_val == False:
processAction("U")
elif b_val == True:
processAction("D")
elif (buttons & cwiid.BTN_HOME):
# Home Calms Servos
processAction("A")
def setfingerperc(cmdkey, cmdorigval, ignorestatus=False):
global SRV_OPTIONS
global STATUS
global status_chk
if STATUS.find("HANDUP") >= 0 or ignorestatus or not status_chk:
if SRV_OPTIONS[cmdkey]["INVERT"] == True:
cmdval = abs(cmdorigval - 100)
else:
cmdval = cmdorigval
setval = (cmdval * (SRV_OPTIONS[cmdkey]['RANGE_MAX'] - SRV_OPTIONS[cmdkey]['RANGE_MIN']) / 100) + SRV_OPTIONS[cmdkey]['RANGE_MIN']
if DEBUG or NET_DEBUG:
print("Setting Servo: %s (%s) to %s percent - (%s)" % (cmdkey, SRV_OPTIONS[cmdkey]['DESC'], cmdorigval, setval))
pwm.set_pwm(cmdkey, 0, setval)
else:
print("Will not preform commands due to STATUS: %s" % STATUS)
def callback(mesg_list, time):
global SENSORS
global b_val
global STATUS
for mesg in mesg_list:
if mesg[0] == cwiid.MESG_BTN:
handle_buttons(mesg[1])
if BUT_DEBUG or DEBUG:
print("Time: %s" % time)
print 'Button Report: %.4X' % mesg[1]
else:
print 'Unknown Report'
def printActions():
type_dict = {}
type_dict['0'] = "Maintaining the Thing"
type_dict['1'] = "Single Actions"
type_dict['2'] = "Neat Actions"
type_dict['3'] = "Addams Family Macros"
for t in range(4):
print("")
print("------------------------------------------------")
print("Type %s - %s" % (t, type_dict[str(t)]))
for x in ACTIONS:
if x['TYPE'] == t:
print("\t%s - %s\t\t%s" % (x['KEY'], x['NAME'], x['DESC']))
print("")
def processAction(actKey):
global STATUS
global status_chk
act = {}
bfound = False
for x in ACTIONS:
if actKey == x['KEY']:
act = x
bfound = True
if bfound == True:
new_status = act['STATUS']
req_status = act['REQ_STATUS']
actStr = act['ACTION']
if req_status != "":
if STATUS.find(req_status) < 0 and status_chk:
print("Can't do it")
print("STATUS: %s" % STATUS)
print("req_status: %s" % req_status)
return
print("Running Action: %s" % act['NAME'])
for action in actStr.split(","):
tval = action.split(":")
act = tval[0]
val = tval[1]
if act == "P":
val = float(val)
time.sleep(val)
elif act == "S":
STATUS = val
elif act == "A":
shutdown = False
try:
val = int(val)
if val == 0:
shutdown = True
except:
shutdown = False
if shutdown == True:
for x in range(len(SRV_OPTIONS)):
if x != 5 and x != 7:
pwm.set_pwm(x, 4096, 0)
else:
processAction(val)
else:
act = int(act)
val = int(val)
if val == -10:
pwm.set_pwm(act, 4096, 0)
else:
setfingerperc(act, val, True)
if new_status != "":
STATUS = new_status
def loadfile(f):
o = open(f, "rb")
tj = o.read()
o.close()
pj = ""
for line in tj.split("\n"):
if line.strip() == "" or line.strip().find("#") == 0:
pass
else:
pj += line.strip() + "\n"
# print(pj)
return json.loads(pj)
def exitGracefully():
print("")
print("ok Bye")
pwm.set_pwm(0, 4096, 0)
pwm.set_pwm(1, 4096, 0)
pwm.set_pwm(2, 4096, 0)
pwm.set_pwm(3, 4096, 0)
pwm.set_pwm(4, 4096, 0)
# pwm.set_pwm(5, 4096, 0)
pwm.set_pwm(6, 4096, 0)
# pwm.set_pwm(7, 4096, 0)
pwm.set_pwm(8, 4096, 0)
sys.exit(0)
def setServoPulse(channel, pulse):
pulseLength = 1000000 # 1,000,000 us per second
pulseLength /= 60 # 60 Hz
print "%d us per period" % pulseLength
pulseLength /= 4096 # 12 bits of resolution
print "%d us per bit" % pulseLength
pulse *= 1000
pulse /= pulseLength
pwm.set_pwm(channel, 0, pulse)
if __name__ == "__main__":
main()
```
#### File: pimeup/throne/throne.py
```python
import time
import random
import sys
import cwiid
import json
import gevent
from collections import OrderedDict
import cStringIO
import alsaaudio
import wave
import requests
import os
import struct
import math
from dotstar import Adafruit_DotStar
import socket
WHATAMI = os.path.basename(__file__).replace(".py", "")
WHOAMI = socket.gethostname()
m = alsaaudio.Mixer('PCM')
current_volume = m.getvolume() # Get the current Volume
print("Cur Vol: %s " % current_volume)
m.setvolume(100) # Set the volume to 70%.
current_volume = m.getvolume() # Get the current Volume
print("Cur Vol: %s " % current_volume)
mesg = False
rpt_mode = 0
wiimote = None
connected = False
rumble = 0
numpixels = 264 # Number of LEDs in strip
# Here's how to control the strip from any two GPIO pins:
datapin = 23
clockpin = 24
fire_colors = [ "#001100", "#005500", "#00FF00", "#33FFFF", "#FFFFFF" ]
outtimes = {}
mydelays = [0.001]
#, 0.02, 0.03, 0.1, 0.15]
heat = []
heat = []
for x in range(numpixels):
heat.append(30)
COOLING = 15
num_colors = 100
my_colors = []
colors_dict = OrderedDict()
allcolors = []
fireplacestarttime = 0
soundstarttime = 0
curplay = 66
lasthb = 0
hbinterval = 30
fireplace = True
fireplacestart = False
soundstart = False
soundplaying = False
#Setting color to: 0xFF0000 # Green
#Setting color to: 0xCC00CC # Bright Teal
#Setting color to: 0x66CC00 # Orange
#Setting color to: 0x33FFFF # Magenta
#Setting color to: 0xFF00 # Red
#Setting color to: 0x330099 # Lightish Blue
#Setting color to: 0xFFFF00 # YEllow
#Setting color to: 0xFF # Bright Blue
#Setting color to: 0xFF9900 # YEllower Gren
#Setting color to: 0x33 # Dark BLue
strip = Adafruit_DotStar(numpixels, datapin, clockpin)
strip.setBrightness(255)
strip.begin() # Initialize pins for output
def main():
global strip
global allcolors
global firecolors
logevent("startup", "startup", "Just started and ready to run")
for x in range(len(fire_colors)):
if x == len(fire_colors) -1:
pass
else:
print("Adding gradient for %s (%s) to %s (%s) with %s colors" % (fire_colors[x], hex_to_RGB(fire_colors[x]), fire_colors[x+1], hex_to_RGB(fire_colors[x+1]), num_colors))
gtmp = linear_gradient(fire_colors[x], fire_colors[x+1], num_colors)
my_colors.append(gtmp['hex'])
colors_dict[fire_colors[x] + "_2_" + fire_colors[x+1]] = gtmp['hex']
for x in colors_dict:
for y in colors_dict[x]:
# print("Color: %s" % hex_to_RGB(y))
allcolors.append(y)
#Connect to address given on command-line, if present
print 'Put Wiimote in discoverable mode now (press 1+2)...'
global wiimote
global rpt_mode
global connected
global rumble
print("Trying Connection")
print ("Press 1+2")
while not connected:
try:
wiimote = cwiid.Wiimote()
print("Connected!")
connected = True
rumble ^= 1
wiimote.rumble = rumble
time.sleep(2)
rumble ^= 1
wiimote.rumble = rumble
logevent("wii", "connect", "Wii remote just synced up")
except:
print("Trying Again, please press 1+2")
time.sleep(2)
wiimote.mesg_callback = callback
print("For LED we enable Button")
rpt_mode ^= cwiid.RPT_BTN
# Enable the messages in callback
wiimote.enable(cwiid.FLAG_MESG_IFC);
wiimote.rpt_mode = rpt_mode
gevent.joinall([
gevent.spawn(normal),
gevent.spawn(FirePlace),
gevent.spawn(playSound),
])
def logevent(etype, edata, edesc):
global WHOAMI
global WHATAMI
curtime = int(time.time())
curts = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(curtime))
outrec = OrderedDict()
outrec['ts'] = curts
outrec['host'] = WHOAMI
outrec['script'] = WHATAMI
outrec['event_type'] = etype
outrec['event_data'] = edata
outrec['event_desc'] = edesc
sendlog(outrec, False)
outrec = None
def normal():
global strip
global lasthb
global hbinterval
global soundstart
global curplay
global fireplacestart
global fireplacestarttime
global soundstarttime
global heat
global outtimes
global soundplaying
try:
while True:
curtime = int(time.time())
if curtime - lasthb > hbinterval:
logevent("heartbeat", wiimote.state['battery'], "wii HB")
lasthb = curtime
gevent.sleep(0.001)
except KeyboardInterrupt:
print("Exiting")
setAllLEDS(strip, [0x000000])
strip.setBrightness(0)
strip.show()
sys.exit()
def playSound():
global soundstart
global fireplacestart
global soundplaying
sounds = [0, 0, 0]
channels = 2
rate = 44100
size = 1024
out_stream = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, alsaaudio.PCM_NORMAL, 'default')
out_stream.setformat(alsaaudio.PCM_FORMAT_S16_LE)
out_stream.setchannels(channels)
out_stream.setrate(rate)
out_stream.setperiodsize(size)
soundreset = False
soundfiles = ['/home/pi/tool_mantra.wav']
memsound = {}
print("Loading Sound files to memory")
for sf in soundfiles:
f = open(sf, "rb")
sfdata = f.read()
f.close()
memsound[sf] = cStringIO.StringIO(sfdata)
while True:
if soundstart == True:
if soundreset == False:
curfile = random.choice(soundfiles)
memsound[curfile].seek(0)
soundreset = True
soundstart = False
soundplaying = True
fireplacestart = True
data = memsound[curfile].read(size)
while data:
out_stream.write(data)
data = memsound[curfile].read(size)
gevent.sleep(0.001)
soundreset = False
soundplaying = False
else:
soundplaying = False
gevent.sleep(0.001)
def FirePlace():
global numpixels
global COOLING
global strip
global allcolors
global heat
global fireplacestart
global fireplace
# Every cycle there will be some random cololing
# Consider adding a degree of random whether a pixel cools
try:
while True:
#If we see start then reset all to 255
if fireplacestart == True:
for i in range(numpixels):
heat[i] = 255
fireplacestart = False
if fireplace == True:
for i in range(numpixels):
if random.randint(0, 255) < COOLING:
tval = heat[i] - random.randint(0, ((COOLING * 10) / numpixels) + 2)
heat[i] = tval
gevent.sleep(random.choice(mydelays))
# This is supposed to be a diffusing effect I think
# k = numpixels -3
# while k > 2:
# if random.randint(0, 255) * 2 < COOLING:
# tval = (heat[k-1] + heat[ k- 2 ] + heat[ k- 2] ) / 3
# heat[k] = tval
# k = k - 1
# gevent.sleep(random.choice(mydelays))
# Now, actually set the pixels based on a scaled representation of all pixels
for j in range(numpixels):
if heat[j] > 255:
heat[j] = 255
if heat[j] < 0:
heat[j] = 0
newcolor = int((heat[j] * len(allcolors)) / 256)
strip.setPixelColor(j, int(allcolors[newcolor].replace("#", ''), 16))
gevent.sleep(random.choice(mydelays))
strip.show()
gevent.sleep(random.choice(mydelays))
else:
gevent.sleep(0.001)
except KeyboardInterrupt:
print("")
print("exiting and shutting down strip")
setAllLEDS(strip, [0x000000])
sys.exit(0)
def sendlog(log, debug):
logurl = "http://hauntcontrol:5050/hauntlogs"
try:
r = requests.post(logurl, json=log)
if debug:
print("Posted to %s status code %s" % (logurl, r.status_code))
print(json.dumps(log))
except:
if debug:
print("Post to %s failed timed out?" % logurl)
print(json.dumps(log))
def setAllLEDS(strip, colorlist):
for x in range(numpixels):
strip.setPixelColor(x, colorlist[0])
strip.show()
def rms(frame):
SHORT_NORMALIZE = (1.0/32768.0)
CHUNK = 1024
swidth = 2
count = len(frame)/swidth
format = "%dh"%(count)
shorts = struct.unpack( format, frame )
sum_squares = 0.0
for sample in shorts:
n = sample * SHORT_NORMALIZE
sum_squares += n*n
rms = math.pow(sum_squares/count,0.5);
return rms * 10000
def color_dict(gradient):
''' Takes in a list of RGB sub-lists and returns dictionary of
colors in RGB and hex form for use in a graphing function
defined later on '''
return {"hex":[RGB_to_hex(RGB) for RGB in gradient],
"r":[RGB[0] for RGB in gradient],
"g":[RGB[1] for RGB in gradient],
"b":[RGB[2] for RGB in gradient]}
def linear_gradient(start_hex, finish_hex="#FFFFFF", n=10):
''' returns a gradient list of (n) colors between
two hex colors. start_hex and finish_hex
should be the full six-digit color string,
inlcuding the number sign ("#FFFFFF") '''
# Starting and ending colors in RGB form
s = hex_to_RGB(start_hex)
f = hex_to_RGB(finish_hex)
# Initilize a list of the output colors with the starting color
RGB_list = [s]
# Calcuate a color at each evenly spaced value of t from 1 to n
for t in range(1, n):
# Interpolate RGB vector for color at the current value of t
curr_vector = [ int(s[j] + (float(t)/(n-1))*(f[j]-s[j])) for j in range(3)]
# Add it to our list of output colors
RGB_list.append(curr_vector)
return color_dict(RGB_list)
def handle_buttons(buttons):
global heat
global strip
global soundstart
global soundplaying
if (buttons & cwiid.BTN_A):
print("soundplaying in A: %s" % soundplaying)
if soundplaying == False:
soundstart = True
logevent("index_change", "reset", "Reset the index to start loop again")
gevent.sleep(0.001)
def hex_to_RGB(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
def RGB_to_hex(RGB):
''' [255,255,255] -> "#FFFFFF" '''
# Components need to be integers for hex to make sense
RGB = [int(x) for x in RGB]
return "#"+"".join(["0{0:x}".format(v) if v < 16 else
"{0:x}".format(v) for v in RGB])
def callback(mesg_list, time):
for mesg in mesg_list:
if mesg[0] == cwiid.MESG_BTN:
handle_buttons(mesg[1])
else:
print 'Unknown Report'
if __name__ == "__main__":
main()
```
#### File: pimeup/torture2/torture2.py
```python
import time
import random
import sys
import alsaaudio
import json
import requests
import os
import socket
from collections import OrderedDict
WHOAMI = socket.gethostname()
WHATAMI = os.path.basename(__file__).replace(".py", "")
#m = alsaaudio.Mixer('PCM')
#current_volume = m.getvolume() # Get the current Volume
#print("Cur Vol: %s " % current_volume)
#m.setvolume(100) # Set the volume to 70%.
#current_volume = m.getvolume() # Get the current Volume
#print("Cur Vol: %s " % current_volume)
hbinterval = 30
lasthb = 0
def main():
logevent("startup", "startup", "Just started and ready to run")
global lasthb
global hbinterval
sounds = [0, 0, 0]
channels = 2
rate = 44100
size = 1024
out_stream = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, alsaaudio.PCM_NORMAL, 'default')
# out_stream.setformat(alsaaudio.PCM_FORMAT_S16_LE)
# out_stream.setchannels(channels)
# out_stream.setrate(rate)
# out_stream.setperiodsize(size)
soundfiles = ['/home/pi/torture_audio.wav']
while True:
curfile = random.choice(soundfiles)
curtime = int(time.time())
if curtime - lasthb > hbinterval:
logevent("heartbeat", "Working", "Cur Childhood sound file: %s" % curfile)
lasthb = curtime
curstream = open(curfile, "rb")
data = curstream.read(size)
while data:
out_stream.write(data)
data = curstream.read(size)
curstream.close()
sys.exit(0)
def logevent(etype, edata, edesc):
global WHOAMI
global WHATAMI
curtime = int(time.time())
curts = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(curtime))
outrec = OrderedDict()
outrec['ts'] = curts
outrec['host'] = WHOAMI
outrec['script'] = WHATAMI
outrec['event_type'] = etype
outrec['event_data'] = edata
outrec['event_desc'] = edesc
sendlog(outrec, False)
outrec = None
def sendlog(log, debug):
logurl = "http://hauntcontrol:5050/hauntlogs"
try:
r = requests.post(logurl, json=log)
if debug:
print("Posted to %s status code %s" % (logurl, r.status_code))
print(json.dumps(log))
except:
if debug:
print("Post to %s failed timed out?" % logurl)
print(json.dumps(log))
if __name__ == "__main__":
main()
```
#### File: pimeup/vault/vault.py
```python
import cwiid
import sys
import gevent
import time
import datetime
import atexit
import json
import requests
import os
from collections import OrderedDict
import random
from dotstar import Adafruit_DotStar
import socket
import cStringIO
import alsaaudio
import wave
import struct
import math
WHOAMI = socket.gethostname()
WHATAMI = os.path.basename(__file__).replace(".py", "")
mesg = False
rpt_mode = 0
wiimote = None
connected = False
rumble = 0
numpixels = 120 # Number of LEDs in strip
lasthb = 0
hbinterval = 30
defaultColor = 0xFFFFCC
defaultBright = 255
# Here's how to control the strip from any two GPIO pins:
datapin = 23
clockpin = 24
strip = Adafruit_DotStar(numpixels, datapin, clockpin)
strip.begin() # Initialize pins for output
strip.setBrightness(255) # Limit brightness to ~1/4 duty cycle
playsound = False
normallight = True
goldlight = False
fire_colors = [ "#CCCCCC", "#33CC00", "#66CC00"]
# "#FFFF00"]
num_colors = 100
my_colors = []
colors_dict = OrderedDict()
allcolors = []
pulse_colors = []
pulse_mod = 4
eventarray = []
eventarray.append({"playsound": False, "normallight": True, "goldlight": False})
eventarray.append({"playsound": True, "normallight": False, "goldlight": True})
eventidx = 0
#Setting color to: 0xFF0000 # Green
#Setting color to: 0xCC00CC # Bright Teal
#Setting color to: 0x66CC00 # Orange
#Setting color to: 0x33FFFF # Magenta
#Setting color to: 0xFF00 # Red
#Setting color to: 0x330099 # Lightish Blue
#Setting color to: 0xFFFF00 # YEllow
#Setting color to: 0xFF # Bright Blue
#Setting color to: 0xFF9900 # YEllower Gren
#Setting color to: 0x33 # Dark BLue
def main():
#Connect to address given on command-line, if present
logevent("startup", "startup", "Just started and ready to run")
print 'Put Wiimote in discoverable mode now (press 1+2)...'
global wiimote
global rpt_mode
global connected
global strip
global rumble
global allcolors
global my_colors
global fire_colors
global pulse_colors
global pulse_mod
for x in range(len(fire_colors)):
if x == len(fire_colors) -1:
pass
else:
print("Adding gradient for %s (%s) to %s (%s) with %s colors" % (fire_colors[x], hex_to_RGB(fire_colors[x]), fire_colors[x+1], hex_to_RGB(fire_colors[x+1]), num_colors))
gtmp = linear_gradient(fire_colors[x], fire_colors[x+1], num_colors)
my_colors.append(gtmp['hex'])
colors_dict[fire_colors[x] + "_2_" + fire_colors[x+1]] = gtmp['hex']
for x in colors_dict:
for y in colors_dict[x]:
allcolors.append(y)
ccnt = 0
for x in reversed(allcolors):
ccnt += 1
pulse_colors.append(x)
if ccnt > num_colors / pulse_mod:
break
print("Pulse colors has %s colors" % len(pulse_colors))
print("Trying Connection")
print ("Press 1+2")
while not connected:
try:
wiimote = cwiid.Wiimote()
print("Connected!")
connected = True
rumble ^= 1
wiimote.rumble = rumble
time.sleep(2)
rumble ^= 1
wiimote.rumble = rumble
logevent("wii", "connect", "Wii remote just synced up")
except:
print("Trying Again, please press 1+2")
time.sleep(2)
wiimote.mesg_callback = callback
print("For LED we enable Button")
rpt_mode ^= cwiid.RPT_BTN
# Enable the messages in callback
wiimote.enable(cwiid.FLAG_MESG_IFC);
wiimote.rpt_mode = rpt_mode
gevent.joinall([
gevent.spawn(normal),
gevent.spawn(Lights),
gevent.spawn(PlaySound),
])
def logevent(etype, edata, edesc):
global WHOAMI
global WHATAMI
curtime = int(time.time())
curts = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(curtime))
outrec = OrderedDict()
outrec['ts'] = curts
outrec['host'] = WHOAMI
outrec['script'] = WHATAMI
outrec['event_type'] = etype
outrec['event_data'] = edata
outrec['event_desc'] = edesc
sendlog(outrec, False)
outrec = None
def Lights():
global strip
global normallight
global goldlight
global pulse_colors
global all_colors
goldon = False
lighton = False
while True:
if normallight == True and lighton == False and goldlight == False:
if goldon == True:
setAllLEDS(strip, [0x000000])
goldon = False
gevent.sleep(0.001)
strip.setBrightness(defaultBright)
setAllLEDS(strip, [defaultColor])
lighton = True
gevent.sleep(0.001)
elif goldlight == True and lighton == True and goldon == False:
goldon = True
for x in allcolors:
setAllLEDS(strip, [int(x.replace("#", ''), 16)])
gevent.sleep(0.001)
lighton = False
elif goldlight == True and goldon == True and lighton == False:
for x in pulse_colors:
setAllLEDS(strip, [int(x.replace("#", ''), 16)])
if goldlight == False:
break
gevent.sleep(0.05)
for x in reversed(pulse_colors):
if goldlight == False:
break
setAllLEDS(strip, [int(x.replace("#", ''), 16)])
gevent.sleep(0.05)
gevent.sleep(0.01)
def PlaySound():
global strip
global playsound
print("start playsound")
sounds = [0, 0, 0]
channels = 2
rate = 44100
size = 1024
soundfiles = ['/home/pi/Madness_Vault_Whispers.wav']
out_stream = alsaaudio.PCM(alsaaudio.PCM_PLAYBACK, alsaaudio.PCM_NORMAL, 'default')
out_stream.setformat(alsaaudio.PCM_FORMAT_S16_LE)
out_stream.setchannels(channels)
out_stream.setrate(rate)
out_stream.setperiodsize(size)
memsound = {}
print("Loading Sound files to memory")
for sf in soundfiles:
f = open(sf, "rb")
sfdata = f.read()
f.close()
memsound[sf] = cStringIO.StringIO(sfdata)
soundreset = False
while True:
if playsound == True:
print("Sound")
if soundreset == False:
curfile = random.choice(soundfiles)
memsound[curfile].seek(0)
soundreset = True
data = memsound[curfile].read(size)
gevent.sleep(0.001)
while data and playsound == True:
out_stream.write(data)
data = memsound[curfile].read(size)
gevent.sleep(0.001)
playsound = False
soundreset = False
else:
gevent.sleep(0.001)
sys.exit(0)
def sendlog(log, debug):
logurl = "http://hauntcontrol:5050/hauntlogs"
try:
r = requests.post(logurl, json=log)
if debug:
print("Posted to %s status code %s" % (logurl, r.status_code))
print(json.dumps(log))
except:
if debug:
print("Post to %s failed timed out?" % logurl)
print(json.dumps(log))
def normal():
global strip
global wiimote
global lasthb
global hbinterval
try:
while True:
curtime = int(time.time())
if curtime - lasthb > hbinterval:
logevent("heartbeat", wiimote.state['battery'], "wii HB")
lasthb = curtime
gevent.sleep(0.001)
except KeyboardInterrupt:
print("Exiting")
setAllLEDS(strip, [0x000000])
strip.setBrightness(0)
strip.show()
wiimote.close()
sys.exit()
def handle_buttons(buttons):
global strip
global eventarray
global eventidx
global playsound
global normallight
global goldlight
changed = False
if (buttons & cwiid.BTN_A):
previdx = eventidx
if eventidx == 0:
eventidx += 1
changed = True
if (buttons & cwiid.BTN_1):
previdx = eventidx
if eventidx != 0:
eventidx = 0
changed = True
if changed == True:
curtime = int(time.time())
goldlight = eventarray[eventidx]["goldlight"]
normallight = eventarray[eventidx]["normallight"]
playsound = eventarray[eventidx]["playsound"]
logevent("index_change", eventarray[eventidx], "Event list index changed from %s to %s" % (previdx, eventidx))
def rms(frame):
SHORT_NORMALIZE = (1.0/32768.0)
CHUNK = 1024
swidth = 2
count = len(frame)/swidth
format = "%dh"%(count)
shorts = struct.unpack( format, frame )
sum_squares = 0.0
for sample in shorts:
n = sample * SHORT_NORMALIZE
sum_squares += n*n
rms = math.pow(sum_squares/count,0.5);
return rms * 10000
#BTN_1', 'BTN_2', 'BTN_A', 'BTN_B', 'BTN_DOWN', 'BTN_HOME', 'BTN_LEFT', 'BTN_MINUS', 'BTN_PLUS', 'BTN_RIGHT', 'BTN_UP',
def color_dict(gradient):
''' Takes in a list of RGB sub-lists and returns dictionary of
colors in RGB and hex form for use in a graphing function
defined later on '''
return {"hex":[RGB_to_hex(RGB) for RGB in gradient],
"r":[RGB[0] for RGB in gradient],
"g":[RGB[1] for RGB in gradient],
"b":[RGB[2] for RGB in gradient]}
def linear_gradient(start_hex, finish_hex="#FFFFFF", n=10):
''' returns a gradient list of (n) colors between
two hex colors. start_hex and finish_hex
should be the full six-digit color string,
inlcuding the number sign ("#FFFFFF") '''
# Starting and ending colors in RGB form
s = hex_to_RGB(start_hex)
f = hex_to_RGB(finish_hex)
# Initilize a list of the output colors with the starting color
RGB_list = [s]
# Calcuate a color at each evenly spaced value of t from 1 to n
for t in range(1, n):
# Interpolate RGB vector for color at the current value of t
curr_vector = [ int(s[j] + (float(t)/(n-1))*(f[j]-s[j])) for j in range(3)]
# Add it to our list of output colors
RGB_list.append(curr_vector)
return color_dict(RGB_list)
def hex_to_RGB(hex):
''' "#FFFFFF" -> [255,255,255] '''
# Pass 16 to the integer function for change of base
return [int(hex[i:i+2], 16) for i in range(1,6,2)]
def RGB_to_hex(RGB):
''' [255,255,255] -> "#FFFFFF" '''
# Components need to be integers for hex to make sense
RGB = [int(x) for x in RGB]
return "#"+"".join(["0{0:x}".format(v) if v < 16 else
"{0:x}".format(v) for v in RGB])
def callback(mesg_list, time):
for mesg in mesg_list:
if mesg[0] == cwiid.MESG_BTN:
handle_buttons(mesg[1])
# print("Time: %s" % time)
# print 'Button Report: %.4X' % mesg[1]
else:
print 'Unknown Report'
def setAllLEDS(strip, colorlist):
numcolors = len(colorlist)
for x in range(numpixels):
idx = x % numcolors
strip.setPixelColor(x, colorlist[idx])
strip.show()
if __name__ == "__main__":
main()
``` |
{
"source": "JohnOmernik/pyjsonetl",
"score": 2
} |
#### File: JohnOmernik/pyjsonetl/pyjson.py
```python
from confluent_kafka import Consumer, KafkaError
import json
import re
import time
import shutil
import gzip
import os
import sys
# Variables - Should be setable by arguments at some point
envvars = {}
# envars['var'] = ['default', 'True/False Required', 'str/int']
#Kafka
envvars['zookeepers'] = ['', False, 'str']
envvars['kafka_id'] = ['', False, 'str']
envvars['bootstrap_brokers'] = ['', False, 'str']
envvars['offset_reset'] = ['earliest', False, 'str']
envvars['group_id'] = ['', True, 'str']
envvars['topic'] = ['', True, 'str']
envvars['loop_timeout'] = ["5.0", False, 'flt']
#Loop Control
envvars['rowmax'] = [50, False, 'int']
envvars['timemax'] = [60, False, 'int']
envvars['sizemax'] = [256000, False, 'int']
# JSON Options
envvars['json_gz_compress'] = [0, False, 'bool'] # Not supported yet
envvars['filemaxsize'] = [8000000, False, 'int']
envvars['uniq_env'] = ['HOSTNAME', False, 'str']
# Data Management
envvars['unknownpart'] = ['unknown', False, 'str']
envvars['partition_field'] = ['', True, 'str']
envvars['partmaxage'] = ['600', False, 'int']
envvars['remove_fields_on_fail'] = [0, False, 'int'] # If Json fails to import, should we try to remove_fields based on 'REMOVE_FIELDS'
envvars['remove_fields'] = ['', False, 'str'] # Comma Sep list of fields to try to remove if failure on JSON import
# Destination Options
envvars['table_base'] = ['', True, 'str']
envvars['tmp_part_dir'] = ['.tmp', False, 'str']
envvars['write_live'] = [0, False, 'int']
# Debug
envvars['debug'] = [0, False, 'int']
loadedenv = {}
def main():
global loadedenv
loadedenv = loadenv(envvars)
loadedenv['tmp_part'] = loadedenv['table_base'] + "/" + loadedenv['tmp_part_dir']
loadedenv['uniq_val'] = os.environ[loadedenv['uniq_env']]
if loadedenv['debug'] == 1:
print(json.dumps(loadedenv, sort_keys=True, indent=4, separators=(',', ': ')))
if not os.path.isdir(loadedenv['tmp_part']):
os.makedirs(loadedenv['tmp_part'])
# Get the Bootstrap brokers if it doesn't exist
if loadedenv['bootstrap_brokers'] == "":
if loadedenv['zookeepers'] == "":
print("Must specify either Bootstrap servers via BOOTSTRAP_BROKERS or Zookeepers via ZOOKEEPERS")
sys.exit(1)
mybs = bootstrap_from_zk(loadedenv['zookeepers'], loadedenv['kafka_id'])
else:
if loadedenv['bootstrap_brokers'] == 'mapr':
mybs = ''
if loadedenv['debug'] >= 1:
print (mybs)
# Create Consumer group to listen on the topic specified
c = Consumer({'bootstrap.servers': mybs, 'group.id': loadedenv['group_id'], 'default.topic.config': {'auto.offset.reset': loadedenv['offset_reset']}})
c.subscribe([loadedenv['topic']], on_assign=print_assignment)
# Initialize counters
rowcnt = 0
sizecnt = 0
lastwrite = int(time.time()) - 1
jsonar = []
part_ledger = {}
curfile = loadedenv['uniq_val'] + "_curfile.json"
# Listen for messages
running = True
while running:
curtime = int(time.time())
timedelta = curtime - lastwrite
try:
message = c.poll(timeout=loadedenv['loop_timeout'])
except KeyboardInterrupt:
print("\n\nExiting per User Request")
c.close()
sys.exit(0)
if message == None:
# No message was found but we still want to check our stuff
pass
elif not message.error():
rowcnt += 1
# This is a message let's add it to our queue
try:
# This may not be the best way to approach this.
val = message.value().decode('ascii', errors='ignore')
except:
print(message.value())
val = ""
# Only write if we have a message
if val != "":
#Keep Rough size count
sizecnt += len(val)
failedjson = 0
try:
jsonar.append(json.loads(val))
except:
failedjson = 1
if loadedenv['remove_fields_on_fail'] == 1:
print("JSON Error likely due to binary in request - per config remove_field_on_fail - we are removing the the following fields and trying again")
while failedjson == 1:
repval = message.value()
for f in loadedenv['remove_fields'].split(","):
print("Trying to remove: %s" % f)
repval = re.sub(b'"' + f.encode() + b'":".+?","', b'"' + f.encode() + b'":"","', repval)
try:
jsonar.append(json.loads(repval.decode("ascii", errors='ignore')))
failedjson = 0
break
except:
print("Still could not force into json even after dropping %s" % f)
if failedjson == 1:
if loadedenv['debug'] == 1:
print(repval.decode("ascii", errors='ignore'))
failedjson = 2
if loadedenv['debug'] >= 1 and failedjson >= 1:
print ("JSON Error - Debug - Attempting to print")
print("Raw form kafka:")
try:
print(message.value())
except:
print("Raw message failed to print")
print("Ascii Decoded (Sent to json.dumps):")
try:
print(val)
except:
print("Ascii dump message failed to print")
elif message.error().code() != KafkaError._PARTITION_EOF:
print("MyError: " + message.error())
running = False
break
# If our row count is over the max, our size is over the max, or time delta is over the max, write the group to the json.
if (rowcnt >= loadedenv['rowmax'] or timedelta >= loadedenv['timemax'] or sizecnt >= loadedenv['sizemax']) and len(jsonar) > 0:
parts = []
for x in jsonar:
try:
p = x[loadedenv['partition_field']]
except:
print("Error: Record without Partition field - Using default Partition of %s" % loadedenv['unknownpart'])
p = loadedenv['unknownpart']
if not p in parts:
parts.append(p)
if loadedenv['debug'] >= 1:
print("Write JSON Ar to %s at %s records - Size: %s - Seconds since last write: %s - Partitions in this batch: %s" % (curfile, rowcnt, sizecnt, timedelta, parts))
for part in parts:
partar = []
for x in jsonar:
try:
curpart = x[loadedenv['partition_field']]
except:
curpart = loadedenv['unknownpart']
if curpart == part:
partar.append(x)
if loadedenv['write_live'] == 1:
base_dir = loadedenv['table_base'] + "/" + part
else:
base_dir = loadedenv['table_base'] + "/" + loadedenv['tmp_part_dir'] + "/" + part
final_file = base_dir + "/" + curfile
if not os.path.isdir(base_dir):
try:
os.makedirs(base_dir)
except:
print("Partition Create failed, it may have been already created for %s" % (base_dir))
if loadedenv['debug'] >= 1:
print("----- Writing partition %s to %s" % (part, final_file))
fout = open(final_file, 'a')
for x in partar:
fout.write(json.dumps(x) + "\n")
fout.close()
cursize = os.path.getsize(final_file)
ledger = [curtime, cursize, final_file]
part_ledger[part] = ledger
partar = []
jsonar = []
rowcnt = 0
sizecnt = 0
lastwrite = curtime
removekeys = []
for x in part_ledger.keys():
l = part_ledger[x][0]
s = part_ledger[x][1]
f = part_ledger[x][2]
base_dir = loadedenv['table_base'] + '/' + x
if not os.path.isdir(base_dir):
try:
os.makedirs(base_dir)
except:
print("Partition Create failed, it may have been already created for %s" % (base_dir))
if s > loadedenv['filemaxsize'] or (curtime - l) > loadedenv['partmaxage']:
new_file_name = loadedenv['uniq_val'] + "_" + str(curtime) + ".json"
new_file = base_dir + "/" + new_file_name
if loadedenv['debug'] >= 1:
outreason = ""
if s > loadedenv['filemaxsize']:
outreason = "Max Size"
else:
outreason = "Max Age"
print("%s reached - Size: %s - Age: %s - Writing to %s" % (outreason, s, curtime - l, new_file))
if loadedenv['json_gz_compress'] == 1:
if loadedenv['debug'] >= 1:
print("Compressing json files")
with open(f, 'rb') as f_in:
with gzip.open(f + ".gz", 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(f)
f = f + ".gz"
new_file = new_file + ".gz"
shutil.move(f, new_file)
removekeys.append(x)
for y in removekeys:
del part_ledger[y]
c.close()
def print_assignment(consumer, partitions):
if loadedenv['debug'] >= 1:
print('Assignment of group to partitions %s' % partitions)
def loadenv(evars):
print("Loading Environment Variables")
lenv = {}
for e in evars:
try:
val = os.environ[e.upper()]
except:
if evars[e][1] == True:
print("ENV Variable %s is required and not provided - Exiting" % (e.upper()))
sys.exit(1)
else:
print("ENV Variable %s not found, but not required, using default of '%s'" % (e.upper(), evars[e][0]))
val = evars[e][0]
if evars[e][2] == 'int':
val = int(val)
if evars[e][2] == 'flt':
val = float(val)
if evars[e][2] == 'bool':
val=bool(val)
lenv[e] = val
return lenv
# Get our bootstrap string from zookeepers if provided
def bootstrap_from_zk(ZKs, kafka_id):
from kazoo.client import KazooClient
zk = KazooClient(hosts=ZKs,read_only=True)
zk.start()
brokers = zk.get_children('/%s/brokers/ids' % kafka_id)
BSs = ""
for x in brokers:
res = zk.get('/%s/brokers/ids/%s' % (kafka_id, x))
dj = json.loads(res[0].decode('utf-8'))
srv = "%s:%s" % (dj['host'], dj['port'])
if BSs == "":
BSs = srv
else:
BSs = BSs + "," + srv
zk.stop()
zk = None
return BSs
if __name__ == "__main__":
main()
``` |
{
"source": "JohnOmernik/raspfarm",
"score": 3
} |
#### File: JohnOmernik/raspfarm/farmqueue.py
```python
import sys
import os
import time
try:
import farmradio
except:
print("farmradio not imported")
try:
import farmradio_usb
except:
print("farmradio_usb not imported")
import random
import socket
import hashlib
import threading
import gevent
from collections import OrderedDict
def main():
print("Queue Testing")
class FarmQueue():
fr = None
send_queue = OrderedDict()
recv_queue = OrderedDict()
radio_conf = {}
resend_delay = None
myname = ""
timeout = None
debug = False
def __init__(self, debug=False, timeout=1.0, resend_delay=5, send_prune_window=60, recv_prune_window=60, radio_conf={"radio_freq_mhz": 915.5, "radio_tx_pwr": 20, "radio_serial_port": "spi", "radio_mode": "lora", "radio_spread_factor": 7, "radio_crc": False, "radio_cr": 5, "radio_bw": 125, "radio_wdt": 0}):
self.radio_conf = radio_conf
self.recv_prune_window = recv_prune_window # Number of seconds to leave a processed message after the last ack in recv queue
self.send_prune_window = send_prune_window # Number of seconds since first send (without an ack) to give up on message
self.debug = debug
self.timeout = timeout
self.resend_delay = resend_delay
self.myname = socket.gethostname().lower()
if self.radio_conf['radio_serial_port'] == "spi":
self.fr = farmradio.FarmRadio(debug=self.debug, timeout=self.timeout, radio_conf=self.radio_conf)
else:
self.fr = farmradio_usb.FarmRadio(debug=self.debug, timeout=self.timeout, radio_conf=self.radio_conf)
def getmsg(self):
queue = OrderedDict(self.recv_queue)
for msg in queue.keys():
curtime = int(time.time())
if self.recv_queue[msg]['ts_proc'] == 0:
self.recv_queue[msg]['ts_proc'] = curtime
gevent.sleep(1)
return self.recv_queue[msg]['msg']
elif self.recv_queue[msg]['ts_proc'] > 0:
if curtime - self.recv_queue[msg]['ts_lastack'] >= self.recv_prune_window:
del self.recv_queue[msg]
gevent.sleep(1)
return None
def sendmsgs(self):
while True:
if self.debug:
print("_______________________________________________________Send Queue: %s" % len(self.send_queue.keys()))
queue = OrderedDict(self.send_queue)
for msghash in queue.keys():
if self.debug:
print("processing: %s" % msghash)
curtime = int(time.time())
if (self.send_queue[msghash]['ack'] == False and self.send_queue[msghash]['require_ack'] == True) or self.send_queue[msghash]['last_send'] == 0:
if curtime - self.send_queue[msghash]['last_send'] > self.resend_delay:
if self.send_queue[msghash]['first_send'] == 0:
self.send_queue[msghash]['first_send'] = curtime
self.fr.send_raw(self.send_queue[msghash]['msg'])
self.send_queue[msghash]['last_send'] = curtime
print("Sending %s - %s - Ack Required: %s" % (msghash, self.send_queue[msghash]['msg'], self.send_queue[msghash]['require_ack']))
if curtime - self.send_queue[msghash]['first_send'] >= self.send_prune_window:
print(">>>>> !!!!!!! >>>>> !!!!! - Message %s was first sent %s and it's now %s, longer then the send_prune_window(%s): Removing" % (msghash, self.send_queue[msghash]['first_send'], curtime, self.send_prune_window))
del self.send_queue[msghash]
elif self.send_queue[msghash]['require_ack'] == False:
del self.send_queue[msghash]
elif self.send_queue[msghash]['ack'] == True:
del self.send_queue[msghash]
gevent.sleep(0.5)
gevent.sleep(0.5)
def recvmsgs(self):
while True:
if self.debug:
print("_______________________________________________________Recv Queue: %s" % len(self.recv_queue.keys()))
curtime = int(time.time())
msg, snr = self.fr.recv_raw()
if msg != "" and msg is not None:
msgar = msg.split("~")
if len(msgar) == 5:
try:
msgts = msgar[0]
msgto = msgar[1].lower()
msgfrom = msgar[2].lower()
msgack = int(msgar[3])
msgstr = msgar[4]
except:
print("----- Message did not split into 5 parts: %s" % msg)
msgto = None
if msgto.lower() == self.myname.lower(): # If the dest address is the same as me, then accept the messages
if self.debug:
print("##### Got a FQ message at %s signal: %s" % (msg, snr))
if msgstr.find("ack:") >= 0: # Check to see if this is an acked message
msghash = msgstr.split(":")[1]
if self.debug:
print("@@@@@@ Recv ACK for message %s" % msghash)
if msghash in self.send_queue:
self.send_queue[msghash]['ack'] = True
else:
print("@!@!@ Message ack sent, but we don't have this message in queue")
# this is a Message ack
else: # Normal message
msghash = hashlib.md5(msg.encode("UTF-8")).hexdigest()
if msghash in self.recv_queue: # We've already gotten this, so let's not re process it, but we will resend ack if needed
if msgack == 1:
self.sendack(msgfrom, msghash)
self.recv_queue[msghash]['ts_lastack'] = curtime
else:
self.recv_queue[msghash] = {'from': msgfrom, 'ts_recv': curtime, 'ts_proc': 0, 'ts_lastack': 0, 'msg': msg}
if msgack == 1:
self.sendack(msgfrom, msghash)
self.recv_queue[msghash]['ts_lastack'] = curtime
else:
pass
#print("!!!>> Message not for me: %s vs. %s" % (msgto.lower(), self.myname.lower()))
else:
print("Odd message: %s" % msg)
gevent.sleep(0.5)
#msg: ts~recp~sender~require_ack (0 or 1), msg
# Ack a message we have recieved
def sendack(self, msgto, msghash):
if self.debug:
print("@@@@@ Sending msgack to %s for %s" % (msgto,msghash))
mymsg = "ack:%s" % (msghash)
self.sendmsg(msgto, mymsg, False)
def sendmsg(self, msgto, base_msg, require_ack):
curtime = int(time.time())
if require_ack == True:
msgack = 1
else:
msgack = 0
strmsg = "%s~%s~%s~%s~%s" % (curtime, msgto, self.myname, msgack, base_msg)
msghash = hashlib.md5(strmsg.encode("UTF-8")).hexdigest()
if self.debug:
print("##### Putting msg %s in send_queue: %s" % (msghash, strmsg))
self.send_queue[msghash] = {'to': msgto, 'msg': strmsg, 'last_send': 0, 'first_send': 0, 'require_ack': require_ack, "ack": False}
if __name__ == "__main__":
main()
```
#### File: JohnOmernik/raspfarm/farmradio_usb.py
```python
import time
import sys
import socket
import serial
import binascii
import random
import io
class FarmRadio():
radio_freq_mhz = None
radio_tx_pwr = None
radio_serial_port = None
radio_mode = None
radio_spread_factor = None
radio_crc = None
radio_cr = None
radio_bw = None
radio_wdt = None
timeout = None
myname = None
spi = None
rfm9x = None
ser = None
prev_packet = None
radio_conf={}
debug = None
def __init__(self, debug=False, timeout=2.0, radio_conf={"radio_freq_mhz": 915.5, "radio_tx_pwr": 20, "radio_serial_port": "/dev/ttyUSB0", "radio_mode": "lora", "radio_spread_factor": 7, "radio_crc": False, "radio_cr": 5, "radio_bw": 125, "radio_wdt": 0}):
self.debug = debug
self.timeout = timeout
self.radio_conf = radio_conf
self.radio_freq_mhz = radio_conf['radio_freq_mhz']
self.radio_tx_pwr = radio_conf['radio_tx_pwr']
self.radio_serial_port = radio_conf['radio_serial_port']
self.radio_mode = radio_conf['radio_mode']
self.radio_spread_factor = str(radio_conf['radio_spread_factor'])
self.radio_crc = bool(radio_conf['radio_crc'])
self.radio_wdt = radio_conf['radio_wdt']
if self.radio_crc:
str_radio_crc = "on"
else:
str_radio_crc = "off"
self.radio_cr = radio_conf['radio_cr']
str_radio_cr = "4/%s" % self.radio_cr
self.radio_bw = radio_conf['radio_bw']
# Configure LoRa Radio
print("Init - Radio")
print("------------")
print("Frequency: %s" % self.radio_freq_mhz)
print("TX Power: %s" % self.radio_tx_pwr)
print("Port: %s " % self.radio_serial_port)
print("Packet Timeout: %s" % timeout)
print("")
self.ser = serial.Serial(self.radio_serial_port, '57600', timeout=self.timeout)
self.myname = socket.gethostname().lower()
print(self.send_cmd('mac pause', 1))
time.sleep(0.1)
print(self.send_cmd('radio set mod %s' % self.radio_mode, 1))
print(self.send_cmd('radio set sync 12', 1))
print(self.send_cmd('radio set freq %s' % int((self.radio_freq_mhz * 1000000)), 1))
print(self.send_cmd('radio set pwr %s' % self.radio_tx_pwr, 1))
print(self.send_cmd('radio set sf sf%s' % self.radio_spread_factor, 1))
print(self.send_cmd('radio set crc %s' % str_radio_crc,1))
#print(self.send_cmd('radio set iqi off',1))
print(self.send_cmd('radio set cr %s' % str_radio_cr,1))
print(self.send_cmd('radio set wdt %s' % self.radio_wdt,1))
#print(self.send_cmd('radio set sync 12',1))
print(self.send_cmd('radio set bw %s' % self.radio_bw,1))
print("Radio Init Complete")
def send_cmd(self, cmd, echo=0):
btx = False
if echo == 1:
print(cmd)
if cmd.find("radio tx") >= 0:
btx = True
self.ser.write(('%s\r\n' % cmd).encode('UTF-8'))
time.sleep(0.3)
retval = self.ser.readline().decode('UTF-8').replace("\r\n", "")
if btx == False:
pass
else:
if retval == "ok":
retval = self.ser.readline().decode('UTF-8').replace("\r\n", "")
elif retval == "busy":
retrycnt = 0
while retval != "ok" and retrycnt < 10:
retrycnt += 1
self.ser.write(('%s\r\n' % cmd).encode('UTF-8'))
time.sleep(0.2)
retval = self.ser.readline().decode('UTF-8').replace("\r\n", "")
return retval
def recv_raw(self):
packet = None
snr = None
self.send_cmd('radio rx 0')
packet = self.ser.readline()
snr = self.send_cmd("radio get snr")
packet_text = ""
data = packet.decode('UTF-8').strip()
if data == "radio_err":
packet_text = None
elif data.find("radio_rx ") == 0:
mydata = data.replace("radio_rx ", "").strip()
mydata = mydata[8:]
try:
tpacket = binascii.unhexlify(mydata)
packet_text = str(tpacket, "utf-8")
except:
print("error: %s" % mydata)
tpacket = b"decode_error"
packet_text = str(tpacket, "utf-8")
self.prev_packet = packet_text
else:
print("Unknown: %s" % data)
return packet_text, snr
def send_raw(self, msg):
if len(msg) > 250:
print("Message to large: Not sent")
return -1
mymsg = msg.encode("UTF-8")
sendmsg = "radio tx " + "FFFF0000" + mymsg.hex()
result = self.send_cmd(sendmsg)
print(result)
return 0
def main():
print ("Radio Testing")
if __name__ == "__main__":
main()
```
#### File: JohnOmernik/raspfarm/queue_radio_test.py
```python
import sys
import os
import time
import farmqueue
import random
import gevent
import json
import socket
conf = {}
fq = None
conf = ""
myname = ""
senders = []
def main():
global fq
global myname
global conf
global senders
f = open("farmradio.cfg", 'r')
rawconf = f.read().strip()
conf = json.loads(rawconf)
f.close()
myname = socket.gethostname().lower()
print("Radio Type is %s" % conf['radiotype'])
print("Server is %s" % conf['servername'])
if myname.lower() == conf['servername'].lower():
conf['server'] = True
print("Looks like we are the server!")
else:
conf['server'] = False
if conf['debug'] == 1:
print("Debug is true")
conf['debug'] = True
else:
conf['debug'] = False
fq = farmqueue.FarmQueue(debug=conf['debug'], timeout=conf['timeout'], resend_delay=conf['resend_delay'], radio_conf=conf)
print("Hello - Testing Radio Sending and Queing")
gevent.joinall([
gevent.spawn(procmsg),
gevent.spawn(fq.recvmsgs),
gevent.spawn(fq.sendmsgs)
]
)
def procmsg():
global fq
global conf
global myname
global senders
while True:
try:
# if conf['debug']:
# print("top of main loop")
msg = fq.getmsg()
if msg is not None:
try:
armsg = msg.split("~")
sender = armsg[2].lower()
print("<<<<< Message In: %s" % msg)
if sender not in senders:
senders.append(sender)
except:
print("<!<!< - Odd Message: %s" % msg)
else:
pass
#print("***** - No Data")
gevent.sleep(1)
except KeyboardInterrupt:
print("!!!!!!!!!!!!!!! Keyboard Exit")
sys.exit(0)
gevent.sleep(1)
if random.randint(1,6) <= 2:
if conf['server'] == False:
require_ack = True
print(">>>>> Sending message to server, require ack: %s" % require_ack)
fq.sendmsg(conf['servername'], "A worker message to the server from %s to %s" % (myname, conf['servername']), require_ack)
else:
if len(senders) > 0:
thissender = random.choice(senders)
require_ack = True
print(">>>>> Sending message from server to %s" % thissender)
fq.sendmsg(thissender, "A message from the server", require_ack)
else:
pass
#print("***** No Senders yet - Server not sending message")
gevent.sleep(0.5)
gevent.sleep(0.5)
if __name__ == "__main__":
main()
```
#### File: JohnOmernik/raspfarm/radio_test_usb.py
```python
import sys
import os
import time
import farmradio_usb
import random
def main():
fr = farmradio_usb.FarmRadio()
print("Hello - Testing Radio Sending")
while True:
try:
raw_data = fr.recv_raw(5.0)
if raw_data is not None:
print("Data: %s" % ( raw_data))
time.sleep(0.5)
else:
print("No Data")
except KeyboardInterrupt:
print("Keyboard Exit")
sys.exit(0)
if random.randint(1,6) == 1:
curmsg = int(time.time())
print("Random Send: %s" % curmsg)
fr.send_raw("Hey, how's it going - %s" % curmsg)
time.sleep(0.5)
time.sleep(0.1)
if __name__ == "__main__":
main()
``` |
{
"source": "JohnOmernik/solarpi",
"score": 3
} |
#### File: JohnOmernik/solarpi/sample_data.py
```python
import json
import datetime
import smbus
import socket
import time
from collections import OrderedDict
from pysolar import solar
import pytz
import os.path
MYLAT = 1000.0
MYLNG = 1000.0
STRTZ = ""
ENV_FILE = "env.list"
if not os.path.isfile(ENV_FILE):
print("ENV_FILE at %s not found - exiting")
sys.exit(1)
e = open(ENV_FILE, "r")
lines = e.read()
e.close()
for line in lines.split("\n"):
myline = line.strip()
if myline.find("#") == 0:
pass
elif myline != "":
arline = myline.split("=")
if arline[0] == "MYLAT":
MYLAT = float(arline[1])
if arline[0] == "MYLNG":
MYLNG = float(arline[1])
if arline[0] == "STRTZ":
STRTZ = arline[1]
if MYLAT == 1000.0 or MYLNG == 1000.0 or STRTZ == "":
print("ENV Values not found please check your env.list file to ensure valid values exist for MYLAT, MYLNG, and STRTZ")
sys.exit(1)
print("==================")
print("Starting with values:")
print("MYLAT: %s" % MYLAT)
print("MYLNG: %s" % MYLNG)
print("STRTZ: %s" % STRTZ)
print("=================")
print("")
# Get I2C bus
busloc = 0x68 # Default for the MPU-6000 - Shouldn't need to change this.
bus = smbus.SMBus(1)
myhostname = socket.gethostname()
def main ():
global bus
global busloc
initsensor(bus, busloc)
timezone = pytz.timezone(STRTZ)
# Open File
curtime = datetime.datetime.now()
curday = curtime.strftime("%Y-%m-%d")
mystrtime = curtime.strftime("%Y-%m-%d %H:%M:%S")
fileday = curday
myfile = "./solardata_%s_%s.json" % (myhostname, fileday)
fh = open(myfile, "a")
failcnt = 0
while True:
# Setup time vars
curtime = datetime.datetime.now()
curday = curtime.strftime("%Y-%m-%d")
mystrtime = curtime.strftime("%Y-%m-%d %H:%M:%S")
epochtime = int(time.time())
mydate = timezone.localize(curtime)
# Get Readings
curalt, curaz = get_alt_az(mydate)
try:
xa, ya, za = getreading(bus, "accel", busloc)
xg, yg, zg = getreading(bus, "gyro", busloc)
except:
failcnt += 1
if failcnt >= 10:
print("Failure count is at or over 10, trying to reinit the sensor")
initsensor(bus, busloc)
time.sleep(5)
continue
failcnt = 0
# Check to see if day changed so we can change the file
if curday != fileday:
fh.close()
fileday = curday
myfile = "./solardata_%s_%s.json" % (myhostname, fileday)
fh = open(myfile, "a")
myrec = OrderedDict()
myrec["ts"] = mystrtime
myrec["epochts"] = epochtime
myrec["array"] = myhostname
myrec["accel_x"] = xa
myrec["accel_y"] = ya
myrec["accel_z"] = za
myrec["gyro_x"] = xg
myrec["gyro_y"] = yg
myrec["gyro_z"] = zg
myrec["alt"] = curalt
myrec["az"] = curaz
fh.write(json.dumps(myrec) + "\n")
fh.flush()
time.sleep(5)
def initsensor(bus, busloc):
# Initialize things:
# Select gyroscope configuration register, 0x1B(27)
# 0x18(24) Full scale range = 2000 dps
bus.write_byte_data(busloc, 0x1B, 0x18)
# MPU-6000 address, 0x68(104)
# Select accelerometer configuration register, 0x1C(28)
# 0x18(24) Full scale range = +/-16g
bus.write_byte_data(busloc, 0x1C, 0x18)
# MPU-6000 address, 0x68(104)
# Select power management register1, 0x6B(107)
# 0x01(01) PLL with xGyro referenc
bus.write_byte_data(busloc, 0x6B, 0x01)
#
time.sleep(0.8)
def get_alt_az(dt):
alt = solar.get_altitude(MYLAT, MYLNG, dt)
az = solar.get_azimuth(MYLAT, MYLNG, dt)
return alt, az
def getreading(bus, src, busloc):
# src is accel or gyro
if src == "accel":
srcval = 0x3B
elif src == "gyro":
srcval = 0x43
else:
srcval = 0x00
print("Invalid src")
return (0,0,0)
data = bus.read_i2c_block_data(busloc, srcval, 6)
x = convertreading(data[0], data[1])
y = convertreading(data[2], data[3])
z = convertreading(data[4], data[5])
return x, y, z
def convertreading(val1, val2):
retval = val1 * 256 + val2
if retval > 32767:
retval -= 65536
return retval
if __name__ == '__main__':
main()
```
#### File: JohnOmernik/solarpi/solarpi.py
```python
import json
import datetime
import smbus
import socket
import math
import time
from collections import OrderedDict
from pysolar import solar
import pytz
import os.path
import sys
from dual_g2_hpmd_rpi import motors, MAX_SPEED
#480 is Positive 100% voltage
#-480 is Negative 100% voltage
#240 is Positive 50% voltage
#-240 is Negative 50% voltage
#0 is Stop
MYLAT = 1000.0
MYLNG = 1000.0
EAST_POS=0.0
WEST_POS=0.0
EAST_ANGLE=0.0
WEST_ANGLE=0.0
axis_azi = 0.0
axis_tilt = 0.0
MOVE_INTERVAL=600
NIGHT_POS=0.0
STRTZ = ""
ENV_FILE = "env.list"
if not os.path.isfile(ENV_FILE):
print("ENV_FILE at %s not found - exiting")
sys.exit(1)
e = open(ENV_FILE, "r")
lines = e.read()
e.close()
for line in lines.split("\n"):
myline = line.strip()
if myline.find("#") == 0:
pass
elif myline != "":
arline = myline.split("=")
if arline[0] == "MYLAT":
MYLAT = float(arline[1])
if arline[0] == "MYLNG":
MYLNG = float(arline[1])
if arline[0] == "STRTZ":
STRTZ = arline[1]
if arline[0] == "WEST_ANGLE":
WEST_ANGLE = float(arline[1])
if arline[0] == "EAST_ANGLE":
EAST_ANGLE = float(arline[1])
if arline[0] == "WEST_POS":
WEST_POS = float(arline[1])
if arline[0] == "EAST_POS":
EAST_POS = float(arline[1])
if arline[0] == "AXIS_AZI":
axis_azi = float(arline[1])
if arline[0] == "AXIS_TILT":
axis_tilt = float(arline[1])
if arline[0] == "MOVE_INTERVAL":
MOVE_INTERVAL = int(arline[1])
INVERT_SENSOR = True # We installed our sensor apparently "upside down" therefore we need to invert the reading to align with the solar function
ECONV = EAST_POS / EAST_ANGLE
WCONV = WEST_POS / WEST_ANGLE
if MYLAT == 1000.0 or MYLNG == 1000.0 or STRTZ == "" or EAST_ANGLE == 0.0 or WEST_ANGLE == 0.0 or WEST_POS == 0.0 or EAST_POS == 0.0 or axis_azi == 0.0 or axis_tilt == 0.0:
print("ENV Values not found please check your env.list file to ensure valid values exist for EAST and WEST_POS, EAST and WEST_ANGLE, AXIS_AZI, AXIS_TILE, MYLAT, MYLNG, and STRTZ")
sys.exit(1)
print("==================")
print("Starting with values:")
print("MYLAT: %s" % MYLAT)
print("MYLNG: %s" % MYLNG)
print("STRTZ: %s" % STRTZ)
print("AXIS_AZI: %s" % axis_azi)
print("AXIS_TILT: %s" % axis_tilt)
print("EAST_ANGLE: %s" % EAST_ANGLE)
print("WEST_ANGLE: %s" % WEST_ANGLE)
print("EAST_POS: %s" % EAST_POS)
print("WEST_POS: %s" % WEST_POS)
print("ECONV: %s" % ECONV)
print("WCONV: %s" % WCONV)
print("MOVE_INTERVAL: %s" % MOVE_INTERVAL)
print("INVERT_SENSOR: %s" % INVERT_SENSOR)
print("=================")
print("")
# Get I2C bus
busloc = 0x68 # Default for the MPU-6000 - Shouldn't need to change this.
bus = smbus.SMBus(1)
myhostname = socket.gethostname()
def main():
global bus
global busloc
global axis_tilt
global axis_azi
initsensor(bus, busloc)
timezone = pytz.timezone(STRTZ)
motors.enable()
motors.setSpeeds(0, 0)
RUNNING = True
last_set_val = 0
last_set_time = 0
while RUNNING:
curtime = datetime.datetime.now()
curday = curtime.strftime("%Y-%m-%d")
mystrtime = curtime.strftime("%Y-%m-%d %H:%M:%S")
epochtime = int(time.time())
mydate = timezone.localize(curtime)
curalt, curaz = get_alt_az(mydate)
cur_r = mydeg(get_pos())
track_err = False
if curalt > 0:
# We only check if there is a track error if the sun is up, no point in correcting all night long
if math.fabs(math.fabs(cur_r) - math.fabs(last_set_val)) > 2.0:
print("%s - Track error, going to set track_err to true: cur_r: %s - last_set_val: %s" % (mystrtime, cur_r, last_set_val))
track_err = True
sun_r = getR(curalt, curaz, axis_tilt, axis_azi)
if INVERT_SENSOR:
sun_r = -sun_r
print("%s - Sun is up! - Sun Alt: %s - Sun Azi: %s - Cur Rot: %s - Potential Sun Rot: %s" % (mystrtime, curalt, curaz, cur_r, sun_r))
NEW_SET_VAL = None
if sun_r <= EAST_ANGLE and sun_r >= WEST_ANGLE:
print("%s - Potential new val: %s - cur: %s" % (mystrtime, sun_r, cur_r))
NEW_SET_VAL = sun_r
elif sun_r > EAST_ANGLE and (last_set_val != EAST_ANGLE or track_err == True):
print("%s - Sun Rot (%s) is Beyond East(%s), and array needs to move there" % (mystrtime, sun_r, EAST_ANGLE))
NEW_SET_VAL = EAST_ANGLE
elif sun_r < WEST_ANGLE and (last_set_val != WEST_ANGLE or track_err == True):
print("%s - Sun Rot (%s) is Beyond West(%s), and array needs to move there" % (mystrtime, sun_r, WEST_ANGLE))
NEW_SET_VAL = WEST_ANGLE
if epochtime - last_set_time >= MOVE_INTERVAL and NEW_SET_VAL is not None:
print("%s Setting New val: %s from %s" % (mystrtime, NEW_SET_VAL, cur_r))
last_set_time = epochtime
last_set_val = NEW_SET_VAL
goto_angle(NEW_SET_VAL)
else:
if last_set_val != NIGHT_POS:
print("%s - Sun is down setting to %s for the night" % (mystrtime, NIGHT_POS))
goto_angle(NIGHT_POS)
last_set_val = NIGHT_POS
last_set_time = epochtime
time.sleep(60)
def getR(sun_alt, sun_azi, axis_tilt, axis_azi):
# Return in Degrees
sun_zen = 90 - sun_alt
x_1 = (math.sin(math.radians(sun_zen)) * math.sin(math.radians(sun_azi) - math.radians(axis_azi)))
x_2 = (math.sin(math.radians(sun_zen)) * math.cos(math.radians(sun_azi) - math.radians(axis_azi)) * math.sin(math.radians(axis_tilt)))
x_3 = (math.cos(math.radians(sun_zen)) * math.cos(math.radians(axis_tilt)))
x_4 = x_2 + x_3
X = x_1 / x_4
if X == 0.0 or (X > 0 and (sun_azi - axis_azi) > 0) or (X < 0 and (sun_azi - axis_azi) < 0):
mypsi = math.radians(0.0)
elif X < 0 and (sun_azi - axis_azi) > 0:
mypsi = math.radians(180.0)
elif X > 0 and (sun_azi - axis_azi) < 0:
mypsi = math.radians(-180.0)
else:
print("awe crap")
mypsi = 0
R = math.atan(X) + mypsi
return math.degrees(R)
def goto_angle(setangle):
global ECONV
global WCONV
global motors
CONV = 0.0
if setangle < 0:
CONV = WCONV
elif setangle > 0:
CONV = ECONV
TARGET_POS = CONV * setangle
# Get Current Location
curcnt = 0
cursum = 0.0
failcnt = 0
for x in range(10):
try:
xa, ya, za = getreading(bus, "accel", busloc)
curcnt += 1
cursum += xa
except:
failcnt += 1
if failcnt > 20:
break
print("Reading Fail!!")
else:
continue
CURRENT_POS = get_pos()
print("The current location is %s and you want to go to %s (%s in angle form)" % (CURRENT_POS, TARGET_POS, setangle))
finished = False
if CURRENT_POS > TARGET_POS:
# We want to move west
motor_dir = -480
elif CURRENT_POS < TARGET_POS:
motor_dir = 480
else:
motor_dir = 0
finished = True
print("No change!")
motors.motor1.setSpeed(motor_dir)
tcnt = 0
while finished == False:
tcnt += 1
NEW_POS = get_pos()
if motor_dir < 0:
if NEW_POS <= TARGET_POS:
motors.motor1.setSpeed(0)
finished = True
elif motor_dir > 0:
if NEW_POS >= TARGET_POS:
motors.motor1.setSpeed(0)
finished = True
elif tcnt >= 1200:
print("It has taken over 5 minutes of waiting and we didn't get to where you want, we are giving up at at %s" % NEW_POS)
finished = True
time.sleep(0.5)
print("Finished setting position")
#motors.motor1.setSpeed(-480)
def mydeg(pos):
retval = 0
if pos > 0:
retval = pos / ECONV
elif pos < 0:
retval = pos / WCONV
return retval
def get_pos():
global bus
global busloc
curcnt = 0
cursum = 0.0
failcnt = 0
for x in range(5):
try:
xa, ya, za = getreading(bus, "accel", busloc)
curcnt += 1
cursum += xa
except:
failcnt += 1
if failcnt > 20:
break
print("Reading Fail!!")
else:
continue
return cursum / curcnt
def initsensor(bus, busloc):
# Initialize things:
# Select gyroscope configuration register, 0x1B(27)
# 0x18(24) Full scale range = 2000 dps
bus.write_byte_data(busloc, 0x1B, 0x18)
# MPU-6000 address, 0x68(104)
# Select accelerometer configuration register, 0x1C(28)
# 0x18(24) Full scale range = +/-16g
bus.write_byte_data(busloc, 0x1C, 0x18)
# MPU-6000 address, 0x68(104)
# Select power management register1, 0x6B(107)
# 0x01(01) PLL with xGyro referenc
bus.write_byte_data(busloc, 0x6B, 0x01)
#
time.sleep(0.8)
def getreading(bus, src, busloc):
# src is accel or gyro
if src == "accel":
srcval = 0x3B
elif src == "gyro":
srcval = 0x43
else:
srcval = 0x00
print("Invalid src")
return (0,0,0)
data = bus.read_i2c_block_data(busloc, srcval, 6)
x = convertreading(data[0], data[1])
y = convertreading(data[2], data[3])
z = convertreading(data[4], data[5])
return x, y, z
def convertreading(val1, val2):
retval = val1 * 256 + val2
if retval > 32767:
retval -= 65536
return retval
def get_alt_az(dt):
alt = solar.get_altitude(MYLAT, MYLNG, dt)
az = solar.get_azimuth(MYLAT, MYLNG, dt)
return alt, az
if __name__ == '__main__':
main()
```
#### File: JohnOmernik/solarpi/test_pysolar.py
```python
from pysolar import solar
import time
import datetime
import pytz
import os.path
MYLAT = 1000.0
MYLNG = 1000.0
STRTZ = ""
ENV_FILE = "env.list"
if not os.path.isfile(ENV_FILE):
print("ENV_FILE at %s not found - exiting")
sys.exit(1)
e = open(ENV_FILE, "r")
lines = e.read()
e.close()
for line in lines.split("\n"):
myline = line.strip()
if myline.find("#") == 0:
pass
elif myline != "":
arline = myline.split("=")
if arline[0] == "MYLAT":
MYLAT = float(arline[1])
if arline[0] == "MYLNG":
MYLNG = float(arline[1])
if arline[0] == "STRTZ":
STRTZ = arline[1]
if MYLAT == 1000.0 or MYLNG == 1000.0 or STRTZ == "":
print("ENV Values not found please check your env.list file to ensure valid values exist for MYLAT, MYLNG, and STRTZ")
sys.exit(1)
print("==================")
print("Starting with values:")
print("MYLAT: %s" % MYLAT)
print("MYLNG: %s" % MYLNG)
print("STRTZ: %s" % STRTZ)
print("=================")
print("")
def main():
timezone = pytz.timezone(STRTZ)
#date = datetime.datetime(2018, 10, 22, 13, 20, 10, 130320)
while True:
date = datetime.datetime.now()
mydate = timezone.localize(date)
mystrtime = mydate.strftime("%Y-%m-%d %H:%M:%S")
curalt, curaz = get_alt_az(mydate)
print("%s - Alt: %s - Az: %s" % (mystrtime, curalt, curaz))
time.sleep(10)
def get_alt_az(dt):
alt = solar.get_altitude(MYLAT, MYLNG, dt)
az = solar.get_azimuth(MYLAT, MYLNG, dt)
return alt, az
if __name__ == '__main__':
main()
``` |
{
"source": "JohnOmernik/tempiture",
"score": 2
} |
#### File: JohnOmernik/tempiture/gas.py
```python
import time
import sys
import json
# Initialise the PCA9685 using the default address (0x40).
#from adafruit_servokit import ServoKit
#from adafuit_motor import servo
from adafruit_motor import servo
import os
import board
import busio
import adafruit_pca9685
# Begin ENV Load
try:
gas_debug = bool(int(os.environ["APP_GAS_DEBUG"]))
except:
print("APP_GAS_DEBUG either not provided or not 0 or 1, defaulting to 0 (false)")
gas_debug = False
try:
tmp_servo_hat_clock = os.environ["APP_SERVO_HAT_CLOCK"]
servo_hat_clock = eval(tmp_servo_hat_clock)
except:
print("APP_SERVO_HAT_CLOCK not provided, using board.SCL")
servo_hat_clock = eval("board.SCL")
try:
tmp_servo_hat_data = os.environ["APP_SERVO_HAT_DATA"]
servo_hat_data = eval(tmp_servo_hat_data)
except:
print("APP_SERVO_HAT_CLOCK not provided, using board.SDA")
servo_hat_data = eval("board.SDA")
try:
servo_hat_freq = int(os.environ["APP_SERVO_HAT_FREQ"])
except:
print("APP_SERVO_HAT_FREQ not probided or not valid integer - Defaulting to 200")
servo_hat_freq = 200
try:
servo_hat_chan = int(os.environ["APP_SERVO_HAT_CHAN"])
except:
print("APP_SERVO_HAT_CHAN not provided or not a valid integer - Defaulting to 0")
servo_hat_chan = 0
try:
servo_min = int(os.environ["APP_SERVO_MIN"])
except:
print("APP_SERVO_MIN not prvided using somewhat sane default of 600")
servo_min = 600
try:
servo_max = int(os.environ["APP_SERVO_MAX"])
except:
print("APP_SERVO_MIN not prvided using somewhat sane default of 1305")
servo_max = 1305
#### END ENV Load
i2c = busio.I2C(servo_hat_clock, servo_hat_data)
hat = adafruit_pca9685.PCA9685(i2c)
hat.frequency = servo_hat_freq
GAS_SERVO = servo.Servo(hat.channels[0], min_pulse=servo_min, max_pulse=servo_max)
#kit = ServoKit(channels=16)
#GAS_SERVO.set_pulse_width_range(servo_min, servo_max)
#GAS_SERVO.actuation_range = 100
def main():
cur_perc = 0
initial_min = servo_min
initial_max = servo_max
cur_min = servo_min
cur_max = servo_max
GAS_SERVO.set_pulse_width_range(cur_min, cur_max)
while True:
print("Current min_pulse: %s - Current max_pulse: %s" % (cur_min, cur_max))
tval = input("Set percentage, u to set max pulse, d to set min pulse, or q to quit (Currently: %s Percent): " % cur_perc)
if tval == "q":
print("Quiting, have a nice day!")
GAS_SERVO.angle = None
time.sleep(1)
sys.exit(0)
elif tval == "u" or tval == "d":
if tval == "u":
sval = "max"
else:
sval = "min"
pval = input("Set %s pulse value: " % sval)
try:
pval = int(pval)
except:
print("Value must be integer")
continue
if tval == "u":
cur_max = pval
else:
cur_min = pval
GAS_SERVO.set_pulse_width_range(cur_min, cur_max)
else:
try:
myval = int(tval)
except:
print("Non int value sent")
continue
cur_perc = myval
GAS_SERVO.angle = myval
if myval <= 2 or myval >= 98:
time.sleep(4)
GAS_SERVO.angle = None
if __name__ == "__main__":
main()
``` |
{
"source": "johnomics/tapestry",
"score": 2
} |
#### File: tapestry/tapestry/contig.py
```python
import os, re, warnings
from statistics import mean
from collections import Counter, defaultdict
from intervaltree import Interval, IntervalTree
from .alignments import Alignments
# Define process_contig at top level rather than in class so it works with multiprocessing
def process_contig(contig):
contig.process()
return contig
def get_ploidy(contig, ploidy_depths=None):
contig.ploidys = contig.get_ploidys(ploidy_depths)
contig.ploidy_pc = contig.get_ploidy_pc()
return contig
class Contig:
def __init__(self, cid, rec, telomeres, filenames):
self.id = cid
self.name = rec.id
self.rec = rec
self.telomeres = telomeres
self.filenames = filenames
def report(self, assembly_gc):
report = f"{self.name}"
report += f"\t{len(self)}"
report += f"\t{self.gc:.1f}"
report += f"\t{self.median_read_depth:.1f}"
report += f"\t{self.tel_start}"
report += f"\t{self.tel_end}"
report += f"\t{self.mean_start_overhang}"
report += f"\t{self.mean_end_overhang}"
report += f"\t{self.unique_bases}"
report += f"\t{self.unique_pc:.0f}"
report += "\t" + ','.join([f"{p}:{self.ploidy_pc[p]:.2f}" for p in sorted(self.ploidy_pc)])
return report
def json(self):
return {
'id': self.id,
'group': 'None',
'name' : self.name,
'length': len(self),
'gc': f"{self.gc:.2f}",
'median_read_depth': int(self.median_read_depth),
'tel_start': self.tel_start,
'tel_end': self.tel_end
}
def __len__(self):
return len(self.rec.seq)
def __lt__(self, other):
return len(self) < len(other)
def contig_alignments_json(self):
plot_row_ends = []
alignments = []
for a in sorted(self.contig_alignments, key=lambda a: a.begin):
contig, contig_start, contig_end = a.data
if contig == self.name and (a.begin <= contig_end and a.end >= contig_start):
continue
alignments.append((a.begin, a.end, self.contig_ids[contig], contig_start, contig_end))
return alignments
def process(self):
# Alignments added here for multithreading
self.alignments = Alignments(self.filenames['alignments'], self.windowsize)
self.gc = self.get_gc()
self.read_depths = self.alignments.depths('read', self.name)
self.median_read_depth = self.median_depth(self.read_depths)
self.contig_alignments, self.contig_coverage = self.get_contig_alignments()
self.mean_start_overhang, self.mean_end_overhang = self.get_read_overhangs()
self.region_depths = self.get_region_depths()
self.unique_bases = self.get_unique_bases()
self.unique_pc = self.get_unique_pc()
self.tel_start, self.tel_end = self.num_telomeres()
self.read_alignments = self.plot_read_alignments()
# Alignments work is done; they cannot be pickled, so clean up before return
del(self.alignments)
def completeness(self):
completeness = ''
if self.tel_start > 0 and self.mean_start_overhang is not None and self.mean_start_overhang < 250:
completeness += 'L'
if self.tel_end > 0 and self.mean_end_overhang is not None and self.mean_end_overhang < 250:
completeness += 'R'
if completeness == 'LR':
completeness = 'C'
return completeness if completeness else '-'
def get_gc(self):
# Calculate directly because Biopython GC does not account for gaps
basefreqs = Counter(self.rec.seq)
for base in "ACGT":
if base not in basefreqs:
basefreqs[base] = 0
gc_bases = sum([basefreqs[b] for b in "GC"])
acgt_bases = sum([basefreqs[b] for b in "ACGT"])
gc = 0
if acgt_bases > 0:
gc = (sum([basefreqs[b] for b in "GC"]) / sum([basefreqs[b] for b in "ACGT"])) * 100
return gc
def median_depth(self, depths):
return depths['depth'].median() if depths is not None else 0
def get_read_overhangs(self):
aligned_length = min(20000, len(self)*0.9)
start_overhangs = self.alignments.get_start_overhangs(self.name, 1, min(2000, len(self)), aligned_length)
end_overhangs = self.alignments.get_end_overhangs(self.name, max(len(self)-2000, 1), len(self), aligned_length)
mean_start_overhang = int(mean(start_overhangs)) if start_overhangs else None
mean_end_overhang = int(mean(end_overhangs)) if end_overhangs else None
return mean_start_overhang, mean_end_overhang
def num_telomeres(self):
start_matches = end_matches = 0
if self.telomeres:
for t in self.telomeres:
for s in t, t.reverse_complement():
start_matches += len(list(s.instances.search(self.rec[:1000].seq)))
end_matches += len(list(s.instances.search(self.rec[-1000:].seq)))
return start_matches, end_matches
def get_contig_alignments(self):
alignments = IntervalTree()
alignments_by_contig = defaultdict(IntervalTree)
alignments[1:len(self)] = (self.name, 1, len(self))
for self_start, self_end, contig, contig_start, contig_end in self.alignments.contig_alignments(self.name):
alignments[self_start:self_end+1] = (contig, contig_start, contig_end)
alignments_by_contig[contig][self_start:self_end+1] = 1
coverage = defaultdict(int)
for contig in alignments_by_contig:
alignments_by_contig[contig].merge_overlaps()
coverage[contig] = sum([i.end-i.begin for i in alignments_by_contig[contig]])
return alignments, coverage
def get_region_depths(self):
alignments = self.contig_alignments
regions = alignments.copy()
regions.split_overlaps()
region_depths = IntervalTree()
for region in regions:
region_depths[region.begin:region.end] = len(alignments[region.begin:region.end])
return sorted(region_depths)
def get_unique_bases(self):
unique_bases = len(self)
for region in self.region_depths:
if region.data > 1:
unique_bases -= region.end - region.begin # No need to -1 because end is beyond upper limit
return unique_bases
def get_unique_pc(self):
return self.unique_bases/len(self) * 100
def get_ploidys(self, ploidy_depths):
empty_ploidys = [0] * len(self.read_depths)
if sum(ploidy_depths) == 0:
return empty_ploidys
ploidys = []
for d in self.read_depths['depth']:
window_ploidy = 0
min_ploidy_diff = None
for p, pd in enumerate(ploidy_depths):
ploidy_diff = abs(d-pd)
if min_ploidy_diff is None or ploidy_diff < min_ploidy_diff:
window_ploidy = p
min_ploidy_diff = ploidy_diff
ploidys.append(window_ploidy)
return ploidys
def get_ploidy_pc(self):
ploidy_pc = defaultdict(float)
for p in self.ploidys:
ploidy_pc[p] += 1/len(self.ploidys)
return ploidy_pc
def get_neighbour_details(self, neighbour_contig):
neighbour_id = -1
neighbour_type = 'L' # Loose
if neighbour_contig is not None:
neighbour_id = self.contig_ids[neighbour_contig]
if neighbour_contig == self.name:
neighbour_type = 'S' # Self
else:
neighbour_type = 'C' # Connection
return neighbour_id, neighbour_type
def plot_read_alignments(self):
read_alignments = []
if not self.readoutput or self.alignments.read_alignments(self.name) is None:
return read_alignments
plot_row_ends = []
for i, a in self.alignments.read_alignments(self.name).iterrows():
# Only use neighbour distances if contig is the same;
# if contigs are different, want to show full clips even if the clip aligns elsewhere
start_distance, end_distance = a.left_clip, a.right_clip
if a.pre_contig == self.name:
start_distance = max(a.pre_distance, 0) # Ignore overlapping alignments with negative distances
if a.post_contig == self.name:
end_distance = max(a.post_distance, 0)
start_position = a.ref_start - start_distance
end_position = a.ref_end + end_distance
assigned_row = None
for r, row in enumerate(plot_row_ends):
if row < a.ref_start:
assigned_row = r
plot_row_ends[r] = a.ref_end
break
if assigned_row is None:
assigned_row = len(plot_row_ends)
plot_row_ends.append(end_position)
pre_contig_id, pre_type = self.get_neighbour_details(a.pre_contig)
post_contig_id, post_type = self.get_neighbour_details(a.post_contig)
# int conversion required because Pandas uses numpy int64, which json doesn't understand
read_alignments.append([int(x) for x in
[start_position, # read start including left clip or pre distance
a.ref_start, # contig alignment start
a.ref_end, # contig alignment end
end_position, # read end including right clip or post distance
a.mq, # mapping quality
assigned_row+1, # y position on plot
pre_contig_id,
post_contig_id
]])
read_alignments[-1].append(pre_type)
read_alignments[-1].append(post_type)
return read_alignments
``` |
{
"source": "johnomotani/optionsfactory",
"score": 3
} |
#### File: optionsfactory/optionsfactory/optionsfactory.py
```python
from copy import copy, deepcopy
from ._utils import _checked, _options_table_string
from .withmeta import WithMeta
class OptionsFactory:
"""Factory to create Options instances"""
def __init__(self, *args, **kwargs):
"""Define the members of Options instances that this factory will create
Parameters
----------
*args : dicts of {key: [Withmeta, value or expression]}
These dicts are combined with the kwargs to create the default values for
this object. Intended to allow collecting defaults from contained objects.
For example, if we have a class A, with members from classes B and C which
each have an OptionsFactory, we could have something like:
class A:
options_factory = OptionsFactory(
B.options_factory.defaults,
C.options_factory.defaults,
extra_option1 = 1,
extra_option2 = 2,
)
It is an error for any keys in *args to be repeated or be the same as any in
**kwargs.
**kwargs : key=[WithMeta, value or expression]
Keys are the names of the members of the Options that the factory will
create.
If a value is passed, that is used as the default for this key. If an
expression is passed, it should take one argument, and can access values of
other Options members from that argument. WithMeta allows values or
expressions to be passed with extra metadata. For example,
factory = OptionsFactory(
a=1,
b=lambda options: options.c + options.a
c=lambda options: 3*options["a"]
d=WithMeta(
4, doc="option d", value_type=int, allowed=[4, 5, 6]
)
e=WithMeta(
lambda options: options.a + options.b + options.c + options.d,
doc="option e",
value_type=float,
checks=lambda x: x > 0.0
)
)
"""
self.__defaults = {}
for key, value in kwargs.items():
if isinstance(value, OptionsFactory):
self.__defaults[key] = value
else:
self.__defaults[key] = WithMeta(value)
# Add defaults from *args
for a in args:
if not isinstance(a, OptionsFactory):
raise ValueError(
f"Positional arguments to OptionsFactory.__init__() must be "
f"OptionsFactory instances, was passed a {type(a)}"
)
for key, value in a.defaults.items():
if key in self.__defaults:
if value != self.__defaults[key]:
raise ValueError(
f"{key} has been passed more than once with different "
f"values"
)
if isinstance(value, OptionsFactory):
self.__defaults[key] = value
else:
self.__defaults[key] = WithMeta(value)
@property
def defaults(self):
"""Get the default values defined for this OptionsFactory"""
return deepcopy(self.__defaults)
@property
def doc(self):
"""Get the documentation for the options defined for this OptionsFactory"""
return {key: value.doc for key, value in self.__defaults.items()}
def add(self, **kwargs):
"""Create a more specific version of the factory with extra options. For example,
may be useful for a subclass like
class Parent:
options_factory = OptionsFactory(...)
class Child:
options_factory = Parent.options_factory.add(
an_extra_option="used only by Child"
)
Parameters
----------
**kwargs : key=[WithMeta, value or expression]
The new options to add, these override the ones in the parent factory if key
already exists, but keep the doc, allowed and checks if the option is just a
new value/expression (not a new WithMeta)
"""
new_default_values = deepcopy(self.__defaults)
for key, value in kwargs.items():
if key in new_default_values and isinstance(
new_default_values[key], OptionsFactory
):
if isinstance(value, OptionsFactory):
raise ValueError(
f"Updating the section {key} in OptionsFactory, but was passed "
f"an OptionsFactory. This is forbidden as options from the new "
f"OptionsFactory might unexpectedly overwrite metadata in the "
f"existing section. Pass a dict instead to update {key}."
)
new_default_values[key] = new_default_values[key].add(**value)
elif isinstance(value, OptionsFactory):
if key in new_default_values:
raise ValueError(
f"Passing an OptionsFactory to OptionsFactory.add() creates a "
f"new section, but the option {key} already exists"
)
new_default_values[key] = value
elif isinstance(value, WithMeta):
new_default_values[key] = value
elif key in new_default_values:
# just update the default value or expression
new_default_values[key].value = value
else:
new_default_values[key] = WithMeta(value)
# Use type(self) so that OptionsFactory returns an OptionsFactory but
# MutableOptionsFactory returns a MutableOptionsFactory
return type(self)(**new_default_values)
def __contains__(self, key):
return key in self.__defaults
def create(self, values=None):
"""Create an Options instance
The members of the created Options are defined by this
OptionsFactory instance. Any values passed in the values argument are used,
and the rest are set from defaults, which can be expressions depending on other
members.
Parameters
----------
values : dict or Options, optional
Non-default values to be used
"""
return self.__create_immutable(values)
def create_from_yaml(self, file_like):
"""Create an Options instance from an input YAML file
Parameters
----------
file_like : file handle or similar to read from
File to read from
"""
return self.create(self._load_yaml(file_like))
def _load_yaml(self, file_like):
import yaml
return yaml.safe_load(file_like)
def __create_mutable(self, values=None, parent=None):
if values is None:
values = {}
# do not modify passed-in values
values = deepcopy(dict(values))
# ignore values for keys not in the list of keys defined in the factory
for key in list(values):
if key not in self.__defaults:
del values[key]
# Return new MutableOptions instance
return OptionsFactory.MutableOptions(
values, self.__defaults, self.doc, parent=parent
)
def __create_immutable(self, values=None):
# Create MutableOptions instance: use to check the values and evaluate defaults
mutable_options = self.__create_mutable(values)
# Return new Options instance
return OptionsFactory.Options(mutable_options)
class MutableOptions:
"""Provide access to a pre-defined set of options, with default values that may
depend on the values of other options
"""
def __init__(self, data, defaults, doc, parent=None):
self.__defaults = {
key: value if not isinstance(value, OptionsFactory) else None
for key, value in defaults.items()
}
self.__cache = {}
self.__parent = parent
# don't modify input data
data = copy(data)
self.__data = {}
# Add subsections first
for subsection in self.get_subsections():
if subsection in data:
subsection_data = data[subsection]
del data[subsection]
else:
subsection_data = {}
self.__data[subsection] = defaults[
subsection
]._OptionsFactory__create_mutable(subsection_data, parent=self)
# Add values in this section second - now 'data' contains only values, not
# subsections
for key, value in data.items():
self.__data[key] = _checked(value, meta=self.__defaults[key], name=key)
self.__doc = doc
@property
def doc(self):
return self.__doc
@property
def parent(self):
return self.__parent
def as_table(self):
"""Return a string with a formatted table of the settings"""
return _options_table_string(self)
def to_dict(self, with_defaults=True):
"""Convert the MutableOptions object to a dict
Parameters
----------
with_defaults : bool, default True
Include the default values in the returned dict?
"""
if with_defaults:
return {
key: value
if not isinstance(value, OptionsFactory.MutableOptions)
else value.to_dict(with_defaults)
for key, value in self.items()
}
else:
return {
key: value
if not isinstance(value, OptionsFactory.MutableOptions)
else value.to_dict(with_defaults)
for key, value in self.items()
# Use 'is not True' so we include subsections, for which
# self.is_default(key) returns a dict
if self.is_default(key) is not True
}
def to_yaml(self, file_like, with_defaults=False):
"""Save the options to a YAML file
Save only the non-default options unless with_defaults=True is passed
Parameters
----------
file_like : file handle or similar
File to write to
with_defaults : bool, default False
Save all the options, including default values
"""
import yaml
return yaml.dump(self.to_dict(with_defaults), file_like)
def get_subsections(self):
"""
Iterator over the subsections in this MutableOptions
"""
for key, value in self.__defaults.items():
# None marks subsections in self.__defaults - all other values are
# WithMeta objects
if value is None:
yield key
def __clear_cache(self, is_child=False):
if self.__parent is None or is_child:
# Once we have found the root MutableOptions object, follow the tree,
# clearing the cache of each MutableOptions section or subsection.
self.__cache = {}
for subsection in self.get_subsections():
self[subsection].__clear_cache(True)
else:
# Go up until we find the root MutableOptions object, which has
# (self.parent is None)
self.__parent.__clear_cache()
def __getitem__(self, key):
if key not in self.__defaults:
raise KeyError(f"This Options does not contain {key}")
# If key is already in __cache, then it has a definite value
if key in self.__cache:
return self.__cache[key]
# Check if option was in user-set values
try:
value = self.__data[key]
except KeyError:
pass
else:
self.__cache[key] = value
return value
# When setting default values, detect circular definitions
if not hasattr(self, "_MutableOptions__key_chain"):
chain_start = True
self.__key_chain = [key]
else:
if key in self.__key_chain:
# Found a circular definition
# Tidy up object state
key_chain = self.__key_chain
del self.__key_chain
# Tell the user where the circular definition was
index = key_chain.index(key)
raise ValueError(
f"Circular definition of default values. At least one of "
f"{key_chain[index:]} must have a definite value"
)
chain_start = False
self.__key_chain.append(key)
self.__cache[key] = self.__defaults[key].evaluate_expression(self, name=key)
if chain_start:
# Tidy up temporary member variable
del self.__key_chain
return self.__cache[key]
def __setitem__(self, key, value):
if key not in self.__defaults:
raise KeyError(
f"Tried to set {key}={value} but {key} is not one of the defined "
f"options"
)
# Default values may change, so reset the cache
self.__clear_cache()
self.__data[key] = _checked(value, meta=self.__defaults[key], name=key)
def __delitem__(self, key):
if key not in self.__defaults:
raise KeyError(
f"Tried to unset {key} but {key} is not one of the defined options"
)
if key in self.__data:
# Default values may change, so reset the cache
self.__cache = {}
del self.__data[key]
# Otherwise 'key' is a valid option but was not set, so nothing changes
def __getattr__(self, key):
if key == "_MutableOptions__defaults":
return super(OptionsFactory.MutableOptions, self).__getattr__(key)
if key in self.__defaults:
return self.__getitem__(key)
raise AttributeError(f"This MutableOptions has no attribute {key}.")
def __setattr__(self, key, value):
if hasattr(self, "_MutableOptions__defaults") and key in self.__defaults:
return self.__setitem__(key, value)
super(OptionsFactory.MutableOptions, self).__setattr__(key, value)
def __delattr__(self, key):
if key in self.__defaults:
return self.__delitem__(key)
super(OptionsFactory.MutableOptions, self).__delattr__(key)
def is_default(self, key):
if key not in self.__defaults:
raise KeyError(f"{key} is not in this Options")
value = self[key]
if isinstance(value, OptionsFactory.MutableOptions):
return {k: value.is_default(k) for k in value}
return key not in self.__data
def __contains__(self, key):
return key in self.__defaults
def __len__(self):
return len(self.__defaults)
def __iter__(self):
return iter(self.keys())
def keys(self):
return self.__defaults.keys()
def values(self):
for key in self:
yield self[key]
def items(self):
return zip(self.keys(), self.values())
def __str__(self):
string = "{"
for key in self.__defaults:
value = self[key]
string += f"{key}: {value}"
# Using 'is True' here means we only append ' (default)' to options, not
# sections: if 'key' is a section then self.is_default(key) will return
# a dict
if self.is_default(key) is True:
string += " (default)"
string += ", "
if len(string) > 1:
# remove trailing ", "
string = string[:-2]
string += "}"
return string
class Options:
"""Provide access to a pre-defined set of options, with values fixed when the
instance is created
"""
__frozen = False
def __init__(self, mutable_options):
self.__data = {}
for key, value in mutable_options.items():
if isinstance(value, OptionsFactory.MutableOptions):
self.__data[key] = OptionsFactory.Options(mutable_options[key])
else:
self.__data[key] = deepcopy(value)
self.__doc = deepcopy(mutable_options.doc)
# make a dict of the explicitly-set (non-default) values
self.__is_default = {
key: mutable_options.is_default(key) for key in mutable_options
}
# Set self.__frozen to True to prevent attributes being changed
self.__frozen = True
@property
def doc(self):
return deepcopy(self.__doc)
def as_table(self):
"""Return a string with a formatted table of the settings"""
return _options_table_string(self)
def to_dict(self, with_defaults=True):
"""Convert the MutableOptions object to a dict
Parameters
----------
with_defaults : bool, default True
Include the default values in the returned dict?
"""
if with_defaults:
return {
key: value
if not isinstance(value, OptionsFactory.Options)
else value.to_dict(with_defaults)
for key, value in self.items()
}
else:
return {
key: value
if not isinstance(value, OptionsFactory.Options)
else value.to_dict(with_defaults)
for key, value in self.items()
# Use 'is not True' so we include subsections, for which
# self.is_default(key) returns a dict
if self.is_default(key) is not True
}
def to_yaml(self, file_like, with_defaults=False):
"""Save the options to a YAML file
Save only the non-default options unless with_defaults=True is passed
Parameters
----------
file_like : file handle or similar
File to write to
with_defaults : bool, default False
Save all the options, including default values
"""
import yaml
return yaml.dump(self.to_dict(with_defaults), file_like)
def get_subsections(self):
"""
Iterator over the subsections in this Options
"""
for key, value in self.__data.items():
if isinstance(value, OptionsFactory.Options):
yield key
def __getitem__(self, key):
try:
return deepcopy(self.__data.__getitem__(key))
except KeyError:
raise KeyError(f"This Options does not contain {key}")
def __setitem__(self, key, value):
raise TypeError("Options does not allow assigning to keys")
def __getattr__(self, key):
if key == "_Options__data":
# need to treat __data specially, as we use it for the next test
return super(OptionsFactory.Options, self).__getattr__(key)
if key in self.__data:
return self.__getitem__(key)
try:
return super(OptionsFactory.Options, self).__getattr__(key)
except AttributeError:
raise AttributeError(f"This Options has no attribute {key}.")
def __setattr__(self, key, value):
if self.__frozen:
raise TypeError("Options does not allow assigning to attributes")
super(OptionsFactory.Options, self).__setattr__(key, value)
def __getstate__(self):
# Need to define this so that pickling with dill works
return vars(self)
def __setstate__(self, state):
# Need to define this so that pickling with dill works
vars(self).update(state)
def is_default(self, key):
try:
return self.__is_default[key]
except KeyError:
raise KeyError(f"{key} is not in this Options")
def __contains__(self, key):
return key in self.__data
def __len__(self):
return len(self.__data)
def __iter__(self):
return iter(self.keys())
def keys(self):
return self.__data.keys()
def values(self):
for v in self.__data.values():
yield deepcopy(v)
def items(self):
return zip(self.keys(), self.values())
def __str__(self):
string = "{"
for key in self.__data:
string += f"{key}: {self[key]}"
# Using 'is True' here means we only append ' (default)' to options, not
# sections: if 'key' is a section then self.is_default(key) will return
# a dict
if self.is_default(key) is True:
string += " (default)"
string += ", "
if len(string) > 1:
# remove trailing ", "
string = string[:-2]
string += "}"
return string
class MutableOptionsFactory(OptionsFactory):
"""Factory to create MutableOptions or Options instances"""
def create(self, values=None):
"""Create a MutableOptions instance
The members of the created MutableOptions are defined by this
MutableOptionsFactory instance. Any values passed in the values argument are
used, and the rest are set from defaults, which can be expressions depending on
other members.
Parameters
----------
values : dict or Options, optional
Non-default values to be used
"""
return self._OptionsFactory__create_mutable(values)
def create_immutable(self, values=None):
"""Create an Options instance (which is immutable)
The members of the created Options are defined by this
MutableOptionsFactory instance. Any values passed in the values argument are
used, and the rest are set from defaults, which can be expressions depending on
other members.
Parameters
----------
values : dict or Options, optional
Non-default values to be used
"""
return self._OptionsFactory__create_immutable(values)
def create_from_yaml(self, file_like):
"""Create a MutableOptions instance from an input YAML file
Parameters
----------
file_like : file handle or similar to read from
File to read from
"""
return self.create(self._load_yaml(file_like))
def create_immutable_from_yaml(self, file_like):
"""Create an Options instance (which is immutable) from an input YAML file
Parameters
----------
file_like : file handle or similar to read from
File to read from
"""
return self.create_immutable(self._load_yaml(file_like))
```
#### File: optionsfactory/optionsfactory/_utils.py
```python
def _checked(value, *, meta=None, name=None):
if (
(meta is not None)
and (meta.value_type is not None)
and (not isinstance(value, meta.value_type))
):
raise TypeError(
f"{value} is not of type {meta.value_type}"
f"{'' if name is None else ' for key=' + str(name)}"
)
if meta.allowed is not None:
if value not in meta.allowed:
raise ValueError(
f"{value} is not in the allowed values {meta.allowed}"
f"{'' if name is None else ' for key=' + str(name)}"
)
if meta.check_all is not None:
for check in meta.check_all:
if not check(value):
raise ValueError(
f"The value {value}"
f"{'' if name is None else ' of key=' + str(name)} is not "
f"compatible with check_all"
)
if meta.check_any is not None:
success = False
for check in meta.check_any:
if check(value):
success = True
if not success:
raise ValueError(
f"The value {value}"
f"{'' if name is None else ' of key=' + str(name)} is not "
f"compatible with check_any"
)
return value
def _options_table_string(options):
"""Return a string containing a table of options set"""
formatstring = "{:<50}| {:<27}\n"
defaultformat = "{:<15} (default) "
# Header
result = (
"\nOptions\n=======\n" + formatstring.format("Name", "Value") + "=" * 80 + "\n"
)
def _options_table_subsection(options, subsection_name):
result = ""
# subsection header
if subsection_name is not None:
result += (
"-" * 80 + "\n" + "{:<80}\n".format(subsection_name) + "-" * 80 + "\n"
)
# Row for each value that is not a subsection
for name, value in sorted(options.items()):
if name in options.get_subsections():
continue
valuestring = str(value)
if options.is_default(name):
valuestring = defaultformat.format(valuestring)
result += formatstring.format(name, valuestring)
for name in options.get_subsections():
result += _options_table_subsection(
options[name],
f"{str(subsection_name) + ':' if subsection_name is not None else ''}"
f"{name}",
)
return result
result += _options_table_subsection(options, None)
return result
```
#### File: optionsfactory/optionsfactory/withmeta.py
```python
from collections.abc import Sequence
from ._utils import _checked
class WithMeta:
"""Type for passing metadata with options value or expression into OptionsFactory"""
def __init__(
self,
value,
*,
doc=None,
value_type=None,
allowed=None,
check_all=None,
check_any=None,
):
"""
Parameters
----------
value : expression, value, str, or WithMeta
- If a callable expression is passed, evaluate expression(options) for the
default value
- If a value is passed, used as default value for this option
- If (i) a str is passed and (ii) it is not in the allowed values for this
option, and (iii) it is the name of another option, then set the default
for this option as the value of the other option
- If a WithMeta object is passed, check no other arguments were set and
copy all attributes from value
doc : str, optional
Docstring for this option
value_type : type, optional
Type that this option should have
allowed : value or sequence of values, optional
When the option is set, it must have one of these values.
Cannot be set if 'checks' is given.
check_all : expression or sequence of expressions, optional
When a value is set for this option, all the expressions must return True
when called with that value.
Cannot be set if 'allowed' is given, but can be combined with check_any.
check_any : expression or sequence of expressions, optional
When a value is set for this option, at least one of the expressions must
return True when called with that value.
Cannot be set if 'allowed' is given, but can be combined with check_all.
"""
if isinstance(value, WithMeta):
if (
(doc is not None)
or (value_type is not None)
or (allowed is not None)
or (check_all is not None)
or (check_any is not None)
):
raise ValueError(
f"doc={doc}, value_type={value_type}, allowed={allowed}, "
f"check_all={check_all}, and check_any={check_any} should all be "
f"None when value is a WithMeta"
)
self.value = value.value
self.doc = value.doc
self.value_type = value.value_type
self.allowed = value.allowed
self.check_all = value.check_all
self.check_any = value.check_any
return
self.value = value
self.doc = doc
if isinstance(value_type, Sequence):
value_type = tuple(value_type)
self.value_type = value_type
if (allowed is not None) and (check_all is not None or check_any is not None):
if check_any is None:
raise ValueError("Cannot set both 'allowed' and 'check_all'")
elif check_all is None:
raise ValueError("Cannot set both 'allowed' and 'check_any'")
else:
raise ValueError(
"Cannot set both 'allowed' and 'check_all' or 'check_any'"
)
if allowed is not None:
if (not isinstance(allowed, Sequence)) or isinstance(allowed, str):
# make allowed values a sequence
allowed = (allowed,)
self.allowed = tuple(allowed)
else:
self.allowed = None
if check_all is not None:
if (not isinstance(check_all, Sequence)) or isinstance(check_all, str):
# make check_all expressions a sequence
check_all = (check_all,)
self.check_all = tuple(check_all)
for check in self.check_all:
if not callable(check):
raise ValueError(
f"{check} is not callable, but was passed as a check to "
f"check_all"
)
else:
self.check_all = None
if check_any is not None:
if (not isinstance(check_any, Sequence)) or isinstance(check_any, str):
# make check_any expressions a sequence
check_any = (check_any,)
self.check_any = tuple(check_any)
for check in self.check_any:
if not callable(check):
raise ValueError(
f"{check} is not callable, but was passed as a check to "
f"check_any"
)
else:
self.check_any = None
def __eq__(self, other):
if not isinstance(other, WithMeta):
return False
return (
self.value == other.value
and self.doc == other.doc
and self.allowed == other.allowed
and self.check_all == other.check_all
and self.check_any == other.check_any
)
def __str__(self):
return self.__repr__()
def __repr__(self):
return (
f"WithMeta({self.value}, doc={self.doc}, value_type={self.value_type}), "
f"allowed={self.allowed}, check_all={self.check_all}, "
f"check_any={self.check_any})"
)
def evaluate_expression(self, options, *, name=None):
# Value may be expression or value. Try evaluating as an expression using
# options first
default_maybe_expression = self.value
try:
default_value = default_maybe_expression(options)
except TypeError:
# Try evaluating as name of another option, if default_maybe_expression is a
# str and could not be the value of the option
if (
# Can only be a name if self.value is a str
isinstance(default_maybe_expression, str)
# Check value_type is set and does not include str, otherwise a string
# might be the value of the option
and (self.value_type is not None)
and (self.value_type is not str)
and not (
isinstance(self.value_type, Sequence) and str in self.value_type
)
):
try:
default_value = options[default_maybe_expression]
except KeyError:
raise KeyError(
f"The default {default_maybe_expression}"
f"{' for '+str(name) if name is not None else ''} is not in "
f"the options"
)
else:
default_value = default_maybe_expression
return _checked(default_value, meta=self, name=name)
``` |
{
"source": "johnomotani/xarray",
"score": 2
} |
#### File: xarray/tests/test_plugins.py
```python
from unittest import mock
import pkg_resources
import pytest
from xarray.backends import common, plugins
class DummyBackendEntrypointArgs(common.BackendEntrypoint):
def open_dataset(filename_or_obj, *args):
pass
class DummyBackendEntrypointKwargs(common.BackendEntrypoint):
def open_dataset(filename_or_obj, **kwargs):
pass
class DummyBackendEntrypoint1(common.BackendEntrypoint):
def open_dataset(self, filename_or_obj, *, decoder):
pass
class DummyBackendEntrypoint2(common.BackendEntrypoint):
def open_dataset(self, filename_or_obj, *, decoder):
pass
@pytest.fixture
def dummy_duplicated_entrypoints():
specs = [
"engine1 = xarray.tests.test_plugins:backend_1",
"engine1 = xarray.tests.test_plugins:backend_2",
"engine2 = xarray.tests.test_plugins:backend_1",
"engine2 = xarray.tests.test_plugins:backend_2",
]
eps = [pkg_resources.EntryPoint.parse(spec) for spec in specs]
return eps
@pytest.mark.filterwarnings("ignore:Found")
def test_remove_duplicates(dummy_duplicated_entrypoints):
with pytest.warns(RuntimeWarning):
entrypoints = plugins.remove_duplicates(dummy_duplicated_entrypoints)
assert len(entrypoints) == 2
def test_broken_plugin():
broken_backend = pkg_resources.EntryPoint.parse(
"broken_backend = xarray.tests.test_plugins:backend_1"
)
with pytest.warns(RuntimeWarning) as record:
_ = plugins.build_engines([broken_backend])
assert len(record) == 1
message = str(record[0].message)
assert "Engine 'broken_backend'" in message
def test_remove_duplicates_warnings(dummy_duplicated_entrypoints):
with pytest.warns(RuntimeWarning) as record:
_ = plugins.remove_duplicates(dummy_duplicated_entrypoints)
assert len(record) == 2
message0 = str(record[0].message)
message1 = str(record[1].message)
assert "entrypoints" in message0
assert "entrypoints" in message1
@mock.patch("pkg_resources.EntryPoint.load", mock.MagicMock(return_value=None))
def test_backends_dict_from_pkg():
specs = [
"engine1 = xarray.tests.test_plugins:backend_1",
"engine2 = xarray.tests.test_plugins:backend_2",
]
entrypoints = [pkg_resources.EntryPoint.parse(spec) for spec in specs]
engines = plugins.backends_dict_from_pkg(entrypoints)
assert len(engines) == 2
assert engines.keys() == set(("engine1", "engine2"))
def test_set_missing_parameters():
backend_1 = DummyBackendEntrypoint1
backend_2 = DummyBackendEntrypoint2
backend_2.open_dataset_parameters = ("filename_or_obj",)
engines = {"engine_1": backend_1, "engine_2": backend_2}
plugins.set_missing_parameters(engines)
assert len(engines) == 2
assert backend_1.open_dataset_parameters == ("filename_or_obj", "decoder")
assert backend_2.open_dataset_parameters == ("filename_or_obj",)
backend = DummyBackendEntrypointKwargs()
backend.open_dataset_parameters = ("filename_or_obj", "decoder")
plugins.set_missing_parameters({"engine": backend})
assert backend.open_dataset_parameters == ("filename_or_obj", "decoder")
backend = DummyBackendEntrypointArgs()
backend.open_dataset_parameters = ("filename_or_obj", "decoder")
plugins.set_missing_parameters({"engine": backend})
assert backend.open_dataset_parameters == ("filename_or_obj", "decoder")
def test_set_missing_parameters_raise_error():
backend = DummyBackendEntrypointKwargs()
with pytest.raises(TypeError):
plugins.set_missing_parameters({"engine": backend})
backend = DummyBackendEntrypointArgs()
with pytest.raises(TypeError):
plugins.set_missing_parameters({"engine": backend})
@mock.patch(
"pkg_resources.EntryPoint.load",
mock.MagicMock(return_value=DummyBackendEntrypoint1),
)
def test_build_engines():
dummy_pkg_entrypoint = pkg_resources.EntryPoint.parse(
"cfgrib = xarray.tests.test_plugins:backend_1"
)
backend_entrypoints = plugins.build_engines([dummy_pkg_entrypoint])
assert isinstance(backend_entrypoints["cfgrib"], DummyBackendEntrypoint1)
assert backend_entrypoints["cfgrib"].open_dataset_parameters == (
"filename_or_obj",
"decoder",
)
@mock.patch(
"pkg_resources.EntryPoint.load",
mock.MagicMock(return_value=DummyBackendEntrypoint1),
)
def test_build_engines_sorted():
dummy_pkg_entrypoints = [
pkg_resources.EntryPoint.parse(
"dummy2 = xarray.tests.test_plugins:backend_1",
),
pkg_resources.EntryPoint.parse(
"dummy1 = xarray.tests.test_plugins:backend_1",
),
]
backend_entrypoints = plugins.build_engines(dummy_pkg_entrypoints)
backend_entrypoints = list(backend_entrypoints)
indices = []
for be in plugins.STANDARD_BACKENDS_ORDER:
try:
index = backend_entrypoints.index(be)
backend_entrypoints.pop(index)
indices.append(index)
except ValueError:
pass
assert set(indices) < {0, -1}
assert list(backend_entrypoints) == sorted(backend_entrypoints)
``` |
{
"source": "johnomotani/xBOUT-0.1-backup",
"score": 2
} |
#### File: xbout/tests/test_load.py
```python
from pathlib import Path
import re
import pytest
import numpy as np
from xarray import DataArray, Dataset, concat
from xarray.tests.test_dataset import create_test_data
import xarray.testing as xrt
from natsort import natsorted
from xbout.load import (_check_filetype, _expand_wildcards, _expand_filepaths,
_arrange_for_concatenation, _trim, _infer_contains_boundaries,
open_boutdataset, _BOUT_PER_PROC_VARIABLES)
from xbout.utils import _separate_metadata
def test_check_extensions(tmpdir):
files_dir = tmpdir.mkdir("data")
example_nc_file = files_dir.join('example.nc')
example_nc_file.write("content_nc")
filetype = _check_filetype(Path(str(example_nc_file)))
assert filetype == 'netcdf4'
example_hdf5_file = files_dir.join('example.h5netcdf')
example_hdf5_file.write("content_hdf5")
filetype = _check_filetype(Path(str(example_hdf5_file)))
assert filetype == 'h5netcdf'
example_invalid_file = files_dir.join('example.txt')
example_hdf5_file.write("content_txt")
with pytest.raises(IOError):
filetype = _check_filetype(Path(str(example_invalid_file)))
class TestPathHandling:
def test_glob_expansion_single(self, tmpdir):
files_dir = tmpdir.mkdir("data")
example_file = files_dir.join('example.0.nc')
example_file.write("content")
path = Path(str(example_file))
filepaths = _expand_wildcards(path)
assert filepaths[0] == Path(str(example_file))
path = Path(str(files_dir.join('example.*.nc')))
filepaths = _expand_wildcards(path)
assert filepaths[0] == Path(str(example_file))
@pytest.mark.parametrize("ii, jj", [(1, 1), (1, 4), (3, 1), (5, 3), (12, 1),
(1, 12), (121, 2), (3, 111)])
def test_glob_expansion_both(self, tmpdir, ii, jj):
files_dir = tmpdir.mkdir("data")
filepaths = []
for i in range(ii):
example_run_dir = files_dir.mkdir('run' + str(i))
for j in range(jj):
example_file = example_run_dir.join('example.' + str(j) + '.nc')
example_file.write("content")
filepaths.append(Path(str(example_file)))
expected_filepaths = natsorted(filepaths,
key=lambda filepath: str(filepath))
path = Path(str(files_dir.join('run*/example.*.nc')))
actual_filepaths = _expand_wildcards(path)
assert actual_filepaths == expected_filepaths
def test_no_files(self, tmpdir):
files_dir = tmpdir.mkdir("data")
with pytest.raises(IOError):
path = Path(str(files_dir.join('run*/example.*.nc')))
actual_filepaths = _expand_filepaths(path)
@pytest.fixture()
def create_filepaths():
return _create_filepaths
def _create_filepaths(nxpe=1, nype=1, nt=1):
filepaths = []
for t in range(nt):
for i in range(nype):
for j in range(nxpe):
file_num = (j + nxpe * i)
path = './run{}'.format(str(t)) \
+ '/BOUT.dmp.{}.nc'.format(str(file_num))
filepaths.append(path)
return filepaths
class TestArrange:
def test_arrange_single(self, create_filepaths):
paths = create_filepaths(nxpe=1, nype=1, nt=1)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(paths, nxpe=1, nype=1)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == [None, None, None]
def test_arrange_along_x(self, create_filepaths):
paths = create_filepaths(nxpe=3, nype=1, nt=1)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc',
'./run0/BOUT.dmp.1.nc',
'./run0/BOUT.dmp.2.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(paths, nxpe=3, nype=1)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == [None, None, 'x']
def test_arrange_along_y(self, create_filepaths):
paths = create_filepaths(nxpe=1, nype=3, nt=1)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc'],
['./run0/BOUT.dmp.1.nc'],
['./run0/BOUT.dmp.2.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(
paths, nxpe=1, nype=3)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == [None, 'y', None]
def test_arrange_along_t(self, create_filepaths):
paths = create_filepaths(nxpe=1, nype=1, nt=3)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc']],
[['./run1/BOUT.dmp.0.nc']],
[['./run2/BOUT.dmp.0.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(
paths, nxpe=1, nype=1)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == ['t', None, None]
def test_arrange_along_xy(self, create_filepaths):
paths = create_filepaths(nxpe=3, nype=2, nt=1)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc', './run0/BOUT.dmp.1.nc', './run0/BOUT.dmp.2.nc'],
['./run0/BOUT.dmp.3.nc', './run0/BOUT.dmp.4.nc', './run0/BOUT.dmp.5.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(
paths, nxpe=3, nype=2)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == [None, 'y', 'x']
def test_arrange_along_xt(self, create_filepaths):
paths = create_filepaths(nxpe=3, nype=1, nt=2)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc', './run0/BOUT.dmp.1.nc', './run0/BOUT.dmp.2.nc']],
[['./run1/BOUT.dmp.0.nc', './run1/BOUT.dmp.1.nc', './run1/BOUT.dmp.2.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(
paths, nxpe=3, nype=1)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == ['t', None, 'x']
def test_arrange_along_xyt(self, create_filepaths):
paths = create_filepaths(nxpe=3, nype=2, nt=2)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc', './run0/BOUT.dmp.1.nc', './run0/BOUT.dmp.2.nc'],
['./run0/BOUT.dmp.3.nc', './run0/BOUT.dmp.4.nc', './run0/BOUT.dmp.5.nc']],
[['./run1/BOUT.dmp.0.nc', './run1/BOUT.dmp.1.nc', './run1/BOUT.dmp.2.nc'],
['./run1/BOUT.dmp.3.nc', './run1/BOUT.dmp.4.nc', './run1/BOUT.dmp.5.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(paths, nxpe=3, nype=2)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == ['t', 'y', 'x']
@pytest.fixture()
def bout_xyt_example_files(tmpdir_factory):
return _bout_xyt_example_files
def _bout_xyt_example_files(tmpdir_factory, prefix='BOUT.dmp', lengths=(6, 2, 4, 7),
nxpe=4, nype=2, nt=1, guards={}, syn_data_type='random',
grid=None, squashed=False):
"""
Mocks up a set of BOUT-like netCDF files, and return the temporary test directory containing them.
Deletes the temporary directory once that test is done.
"""
save_dir = tmpdir_factory.mktemp("data")
if squashed:
# create a single data-file, but alter the 'nxpe' and 'nype' variables, as if the
# file had been created by combining a set of BOUT.dmp.*.nc files
ds_list, file_list = create_bout_ds_list(prefix=prefix, lengths=lengths, nxpe=1,
nype=1, nt=nt, guards=guards,
syn_data_type=syn_data_type)
ds_list[0]['nxpe'] = nxpe
ds_list[0]['nype'] = nype
else:
ds_list, file_list = create_bout_ds_list(prefix=prefix, lengths=lengths,
nxpe=nxpe, nype=nype, nt=nt,
guards=guards,
syn_data_type=syn_data_type)
for ds, file_name in zip(ds_list, file_list):
ds.to_netcdf(str(save_dir.join(str(file_name))))
if grid is not None:
xsize = lengths[1]*nxpe
ysize = lengths[2]*nype
grid_ds = create_bout_grid_ds(xsize=xsize, ysize=ysize, guards=guards)
grid_ds.to_netcdf(str(save_dir.join(grid + ".nc")))
# Return a glob-like path to all files created, which has all file numbers replaced
# with a single asterix
path = str(save_dir.join(str(file_list[-1])))
count = 1
if nt > 1:
count += 1
# We have to reverse the path before limiting the number of numbers replaced so that the
# tests don't get confused by pytest's persistent temporary directories (which are also designated
# by different numbers)
glob_pattern = (re.sub(r'\d+', '*', path[::-1], count=count))[::-1]
return glob_pattern
def create_bout_ds_list(prefix, lengths=(6, 2, 4, 7), nxpe=4, nype=2, nt=1, guards={},
syn_data_type='random'):
"""
Mocks up a set of BOUT-like datasets.
Structured as though they were produced by a x-y parallelised run with multiple restarts.
"""
file_list = []
ds_list = []
for i in range(nxpe):
for j in range(nype):
num = (i + nxpe * j)
filename = prefix + "." + str(num) + ".nc"
file_list.append(filename)
# Include guard cells
upper_bndry_cells = {dim: guards.get(dim) for dim in guards.keys()}
lower_bndry_cells = {dim: guards.get(dim) for dim in guards.keys()}
ds = create_bout_ds(syn_data_type=syn_data_type, num=num, lengths=lengths, nxpe=nxpe, nype=nype,
xproc=i, yproc=j, guards=guards)
ds_list.append(ds)
# Sort this in order of num to remove any BOUT-specific structure
ds_list_sorted = [ds for filename, ds in sorted(zip(file_list, ds_list))]
file_list_sorted = [filename for filename, ds in sorted(zip(file_list, ds_list))]
return ds_list_sorted, file_list_sorted
def create_bout_ds(syn_data_type='random', lengths=(6, 2, 4, 7), num=0, nxpe=1, nype=1,
xproc=0, yproc=0, guards={}):
# Set the shape of the data in this dataset
t_length, x_length, y_length, z_length = lengths
mxg = guards.get('x', 0)
myg = guards.get('y', 0)
x_length += 2*mxg
y_length += 2*myg
shape = (t_length, x_length, y_length, z_length)
# calculate global nx, ny and nz
nx = nxpe*lengths[1] + 2*mxg
ny = nype*lengths[2]
nz = 1*lengths[3]
# Fill with some kind of synthetic data
if syn_data_type is 'random':
# Each dataset contains unique random noise
np.random.seed(seed=num)
data = np.random.randn(*shape)
elif syn_data_type is 'linear':
# Variables increase linearly across entire domain
data = DataArray(-np.ones(shape), dims=('t', 'x', 'y', 'z'))
t_array = DataArray((nx - 2*mxg)*ny*nz*np.arange(t_length, dtype=float),
dims='t')
x_array = DataArray(ny*nz*(xproc*lengths[1] + mxg
+ np.arange(lengths[1], dtype=float)),
dims='x')
y_array = DataArray(nz*(yproc*lengths[2] + myg
+ np.arange(lengths[2], dtype=float)),
dims='y')
z_array = DataArray(np.arange(z_length, dtype=float), dims='z')
data[:, mxg:x_length-mxg, myg:y_length-myg, :] = (
t_array + x_array + y_array + z_array
)
elif syn_data_type is 'stepped':
# Each dataset contains a different number depending on the filename
data = np.ones(shape) * num
elif isinstance(syn_data_type, int):
data = np.ones(shape)* syn_data_type
else:
raise ValueError('Not a recognised choice of type of synthetic bout data.')
T = DataArray(data, dims=['t', 'x', 'y', 'z'])
n = DataArray(data, dims=['t', 'x', 'y', 'z'])
ds = Dataset({'n': n, 'T': T})
# BOUT_VERSION needed so that we know that number of points in z is MZ, not MZ-1 (as
# it was in BOUT++ before v4.0
ds['BOUT_VERSION'] = 4.3
# Include grid data
ds['NXPE'] = nxpe
ds['NYPE'] = nype
ds['NZPE'] = 1
ds['PE_XIND'] = xproc
ds['PE_YIND'] = yproc
ds['MYPE'] = num
ds['MXG'] = mxg
ds['MYG'] = myg
ds['nx'] = nx
ds['ny'] = ny
ds['nz'] = nz
ds['MZ'] = 1*lengths[3]
ds['MXSUB'] = lengths[1]
ds['MYSUB'] = lengths[2]
ds['MZSUB'] = lengths[3]
ds['ixseps1'] = nx
ds['ixseps2'] = nx
ds['jyseps1_1'] = 0
ds['jyseps1_2'] = ny
ds['jyseps2_1'] = ny//2 - 1
ds['jyseps2_2'] = ny//2 - 1
ds['ny_inner'] = ny//2
one = DataArray(np.ones((x_length, y_length)), dims=['x', 'y'])
zero = DataArray(np.zeros((x_length, y_length)), dims=['x', 'y'])
ds['zperiod'] = 1
ds['ZMIN'] = 0.
ds['ZMAX'] = 2.*np.pi
ds['g11'] = one
ds['g22'] = one
ds['g33'] = one
ds['g12'] = zero
ds['g13'] = zero
ds['g23'] = zero
ds['g_11'] = one
ds['g_22'] = one
ds['g_33'] = one
ds['g_12'] = zero
ds['g_13'] = zero
ds['g_23'] = zero
ds['G1'] = zero
ds['G2'] = zero
ds['G3'] = zero
ds['J'] = one
ds['Bxy'] = one
ds['zShift'] = zero
ds['dx'] = 0.5*one
ds['dy'] = 2.*one
ds['dz'] = 0.7
ds['iteration'] = t_length
ds['t_array'] = DataArray(np.arange(t_length, dtype=float)*10., dims='t')
return ds
def create_bout_grid_ds(xsize=2, ysize=4, guards={}):
# Set the shape of the data in this dataset
mxg = guards.get('x', 0)
myg = guards.get('y', 0)
xsize += 2*mxg
ysize += 2*myg
shape = (xsize, ysize)
data = DataArray(np.ones(shape), dims=['x', 'y'])
ds = Dataset({'psixy': data, 'Rxy': data, 'Zxy': data, 'hthe': data})
return ds
# Note, MYPE, PE_XIND and PE_YIND not included, since they are different for each
# processor and so are dropped when loading datasets.
METADATA_VARS = ['BOUT_VERSION', 'NXPE', 'NYPE', 'NZPE', 'MXG', 'MYG', 'nx', 'ny', 'nz',
'MZ', 'MXSUB', 'MYSUB', 'MZSUB', 'ixseps1', 'ixseps2', 'jyseps1_1',
'jyseps1_2', 'jyseps2_1', 'jyseps2_2', 'ny_inner', 'zperiod', 'ZMIN',
'ZMAX', 'dz', 'iteration']
class TestStripMetadata():
def test_strip_metadata(self):
original = create_bout_ds()
assert original['NXPE'] == 1
ds, metadata = _separate_metadata(original)
assert original.drop(METADATA_VARS + _BOUT_PER_PROC_VARIABLES,
errors='ignore').equals(ds)
assert metadata['NXPE'] == 1
# TODO also test loading multiple files which have guard cells
class TestOpen:
def test_single_file(self, tmpdir_factory, bout_xyt_example_files):
path = bout_xyt_example_files(tmpdir_factory, nxpe=1, nype=1, nt=1)
actual = open_boutdataset(datapath=path, keep_xboundaries=False)
expected = create_bout_ds()
xrt.assert_equal(actual.load(),
expected.drop(METADATA_VARS + _BOUT_PER_PROC_VARIABLES,
errors='ignore'))
def test_squashed_file(self, tmpdir_factory, bout_xyt_example_files):
path = bout_xyt_example_files(tmpdir_factory, nxpe=4, nype=3, nt=1,
squashed=True)
actual = open_boutdataset(datapath=path, keep_xboundaries=False)
expected = create_bout_ds()
xrt.assert_equal(actual.load(),
expected.drop(METADATA_VARS + _BOUT_PER_PROC_VARIABLES,
errors='ignore'))
def test_combine_along_x(self, tmpdir_factory, bout_xyt_example_files):
path = bout_xyt_example_files(tmpdir_factory, nxpe=4, nype=1, nt=1,
syn_data_type='stepped')
actual = open_boutdataset(datapath=path, keep_xboundaries=False)
bout_ds = create_bout_ds
expected = concat([bout_ds(0), bout_ds(1), bout_ds(2), bout_ds(3)], dim='x',
data_vars='minimal')
xrt.assert_equal(actual.load(),
expected.drop(METADATA_VARS + _BOUT_PER_PROC_VARIABLES,
errors='ignore'))
def test_combine_along_y(self, tmpdir_factory, bout_xyt_example_files):
path = bout_xyt_example_files(tmpdir_factory, nxpe=1, nype=3, nt=1,
syn_data_type='stepped')
actual = open_boutdataset(datapath=path, keep_xboundaries=False)
bout_ds = create_bout_ds
expected = concat([bout_ds(0), bout_ds(1), bout_ds(2)], dim='y',
data_vars='minimal')
xrt.assert_equal(actual.load(),
expected.drop(METADATA_VARS + _BOUT_PER_PROC_VARIABLES,
errors='ignore'))
@pytest.mark.skip
def test_combine_along_t(self):
...
def test_combine_along_xy(self, tmpdir_factory, bout_xyt_example_files):
path = bout_xyt_example_files(tmpdir_factory, nxpe=4, nype=3, nt=1,
syn_data_type='stepped')
actual = open_boutdataset(datapath=path, keep_xboundaries=False)
bout_ds = create_bout_ds
line1 = concat([bout_ds(0), bout_ds(1), bout_ds(2), bout_ds(3)], dim='x',
data_vars='minimal')
line2 = concat([bout_ds(4), bout_ds(5), bout_ds(6), bout_ds(7)], dim='x',
data_vars='minimal')
line3 = concat([bout_ds(8), bout_ds(9), bout_ds(10), bout_ds(11)], dim='x',
data_vars='minimal')
expected = concat([line1, line2, line3], dim='y',
data_vars='minimal')
xrt.assert_equal(actual.load(),
expected.drop(METADATA_VARS + _BOUT_PER_PROC_VARIABLES,
errors='ignore'))
def test_toroidal(self, tmpdir_factory, bout_xyt_example_files):
path = bout_xyt_example_files(tmpdir_factory, nxpe=3, nype=3, nt=1,
syn_data_type='stepped', grid='grid')
actual = open_boutdataset(datapath=path, geometry='toroidal',
gridfilepath=Path(path).parent.joinpath('grid.nc'))
# check dataset can be saved
save_dir = tmpdir_factory.mktemp('data')
actual.bout.save(str(save_dir.join('boutdata.nc')))
def test_salpha(self, tmpdir_factory, bout_xyt_example_files):
path = bout_xyt_example_files(tmpdir_factory, nxpe=3, nype=3, nt=1,
syn_data_type='stepped', grid='grid')
actual = open_boutdataset(datapath=path, geometry='s-alpha',
gridfilepath=Path(path).parent.joinpath('grid.nc'))
# check dataset can be saved
save_dir = tmpdir_factory.mktemp('data')
actual.bout.save(str(save_dir.join('boutdata.nc')))
@pytest.mark.skip
def test_combine_along_tx(self):
...
_test_processor_layouts_list = [
# No parallelization
(0, 0, 1, 1, {'x': True, 'y': True},
{'x': True, 'y': True}),
# 1d parallelization along x:
# Left
(0, 0, 3, 1, {'x': True, 'y': True},
{'x': False, 'y': True}),
# Middle
(1, 0, 3, 1, {'x': False, 'y': True},
{'x': False, 'y': True}),
# Right
(2, 0, 3, 1, {'x': False, 'y': True},
{'x': True, 'y': True}),
# 1d parallelization along y:
# Bottom
(0, 0, 1, 3, {'x': True, 'y': True},
{'x': True, 'y': False}),
# Middle
(0, 1, 1, 3, {'x': True, 'y': False},
{'x': True, 'y': False}),
# Top
(0, 2, 1, 3, {'x': True, 'y': False},
{'x': True, 'y': True}),
# 2d parallelization:
# Bottom left corner
(0, 0, 3, 4, {'x': True, 'y': True},
{'x': False, 'y': False}),
# Bottom right corner
(2, 0, 3, 4, {'x': False, 'y': True},
{'x': True, 'y': False}),
# Top left corner
(0, 3, 3, 4, {'x': True, 'y': False},
{'x': False, 'y': True}),
# Top right corner
(2, 3, 3, 4, {'x': False, 'y': False},
{'x': True, 'y': True}),
# Centre
(1, 2, 3, 4, {'x': False, 'y': False},
{'x': False, 'y': False}),
# Left side
(0, 2, 3, 4, {'x': True, 'y': False},
{'x': False, 'y': False}),
# Right side
(2, 2, 3, 4, {'x': False, 'y': False},
{'x': True, 'y': False}),
# Bottom side
(1, 0, 3, 4, {'x': False, 'y': True},
{'x': False, 'y': False}),
# Top side
(1, 3, 3, 4, {'x': False, 'y': False},
{'x': False, 'y': True})
]
_test_processor_layouts_doublenull_list = [
# 1d parallelization along y:
# Bottom
(0, 0, 1, 4, {'x': True, 'y': True},
{'x': True, 'y': False}),
# Lower Middle
(0, 1, 1, 4, {'x': True, 'y': False},
{'x': True, 'y': True}),
# Upper Middle
(0, 2, 1, 4, {'x': True, 'y': True},
{'x': True, 'y': False}),
# Top
(0, 3, 1, 4, {'x': True, 'y': False},
{'x': True, 'y': True}),
# 2d parallelization:
# Bottom left corner
(0, 0, 3, 4, {'x': True, 'y': True},
{'x': False, 'y': False}),
(1, 0, 3, 4, {'x': False, 'y': True},
{'x': False, 'y': False}),
# Bottom right corner
(2, 0, 3, 4, {'x': False, 'y': True},
{'x': True, 'y': False}),
(0, 1, 3, 4, {'x': True, 'y': False},
{'x': False, 'y': True}),
(1, 1, 3, 4, {'x': False, 'y': False},
{'x': False, 'y': True}),
(2, 1, 3, 4, {'x': False, 'y': False},
{'x': True, 'y': True}),
(0, 2, 3, 4, {'x': True, 'y': True},
{'x': False, 'y': False}),
(1, 2, 3, 4, {'x': False, 'y': True},
{'x': False, 'y': False}),
(2, 2, 3, 4, {'x': False, 'y': True},
{'x': True, 'y': False}),
# Top left corner
(0, 3, 3, 4, {'x': True, 'y': False},
{'x': False, 'y': True}),
(1, 3, 3, 4, {'x': False, 'y': False},
{'x': False, 'y': True}),
# Top right corner
(2, 3, 3, 4, {'x': False, 'y': False},
{'x': True, 'y': True})
]
class TestTrim:
def test_no_trim(self):
ds = create_test_data(0)
# Manually add filename - encoding normally added by xr.open_dataset
ds.encoding['source'] = 'folder0/BOUT.dmp.0.nc'
actual = _trim(ds, guards={}, keep_boundaries={}, nxpe=1,
nype=1)
xrt.assert_equal(actual, ds)
def test_trim_guards(self):
ds = create_test_data(0)
# Manually add filename - encoding normally added by xr.open_dataset
ds.encoding['source'] = 'folder0/BOUT.dmp.0.nc'
actual = _trim(ds, guards={'time': 2}, keep_boundaries={},
nxpe=1, nype=1)
selection = {'time': slice(2, -2)}
expected = ds.isel(**selection)
xrt.assert_equal(expected, actual)
@pytest.mark.parametrize(
"xproc, yproc, nxpe, nype, lower_boundaries, upper_boundaries",
_test_processor_layouts_list)
def test_infer_boundaries_2d_parallelization(
self, xproc, yproc, nxpe, nype, lower_boundaries, upper_boundaries):
"""
Numbering scheme for nxpe=3, nype=4
y 9 10 11
^ 6 7 8
| 3 4 5
| 0 1 2
-----> x
"""
ds = create_test_data(0)
ds['jyseps2_1'] = 0
ds['jyseps1_2'] = 0
ds['PE_XIND'] = xproc
ds['PE_YIND'] = yproc
actual_lower_boundaries, actual_upper_boundaries = _infer_contains_boundaries(
ds, nxpe, nype)
assert actual_lower_boundaries == lower_boundaries
assert actual_upper_boundaries == upper_boundaries
@pytest.mark.parametrize(
"xproc, yproc, nxpe, nype, lower_boundaries, upper_boundaries",
_test_processor_layouts_doublenull_list)
def test_infer_boundaries_2d_parallelization_doublenull(
self, xproc, yproc, nxpe, nype, lower_boundaries, upper_boundaries):
"""
Numbering scheme for nxpe=3, nype=4
y 9 10 11
^ 6 7 8
| 3 4 5
| 0 1 2
-----> x
"""
ds = create_test_data(0)
ds['jyseps2_1'] = 3
ds['jyseps1_2'] = 11
ds['ny_inner'] = 8
ds['MYSUB'] = 4
ds['PE_XIND'] = xproc
ds['PE_YIND'] = yproc
actual_lower_boundaries, actual_upper_boundaries = _infer_contains_boundaries(
ds, nxpe, nype)
assert actual_lower_boundaries == lower_boundaries
assert actual_upper_boundaries == upper_boundaries
@pytest.mark.parametrize("xproc, yproc, nxpe, nype, lower_boundaries, upper_boundaries",
_test_processor_layouts_list)
def test_infer_boundaries_2d_parallelization_by_filenum(
self, xproc, yproc, nxpe, nype, lower_boundaries, upper_boundaries):
"""
Numbering scheme for nxpe=3, nype=4
y 9 10 11
^ 6 7 8
| 3 4 5
| 0 1 2
-----> x
"""
filenum = yproc*nxpe + xproc
ds = create_test_data(0)
ds['jyseps2_1'] = 0
ds['jyseps1_2'] = 0
ds.encoding['source'] = "folder0/BOUT.dmp." + str(filenum) + ".nc"
actual_lower_boundaries, actual_upper_boundaries = _infer_contains_boundaries(
ds, nxpe, nype)
assert actual_lower_boundaries == lower_boundaries
assert actual_upper_boundaries == upper_boundaries
@pytest.mark.parametrize("xproc, yproc, nxpe, nype, lower_boundaries, upper_boundaries",
_test_processor_layouts_doublenull_list)
def test_infer_boundaries_2d_parallelization_doublenull_by_filenum(
self, xproc, yproc, nxpe, nype, lower_boundaries, upper_boundaries):
"""
Numbering scheme for nxpe=3, nype=4
y 9 10 11
^ 6 7 8
| 3 4 5
| 0 1 2
-----> x
"""
filenum = yproc*nxpe + xproc
ds = create_test_data(0)
ds['jyseps2_1'] = 3
ds['jyseps1_2'] = 11
ds['ny_inner'] = 8
ds['MYSUB'] = 4
ds.encoding['source'] = "folder0/BOUT.dmp." + str(filenum) + ".nc"
actual_lower_boundaries, actual_upper_boundaries = _infer_contains_boundaries(
ds, nxpe, nype)
assert actual_lower_boundaries == lower_boundaries
assert actual_upper_boundaries == upper_boundaries
def test_keep_xboundaries(self):
ds = create_test_data(0)
ds = ds.rename({'dim2': 'x'})
# Manually add filename - encoding normally added by xr.open_dataset
ds.encoding['source'] = 'folder0/BOUT.dmp.0.nc'
ds['jyseps2_1'] = 8
ds['jyseps1_2'] = 8
actual = _trim(ds, guards={'x': 2}, keep_boundaries={'x': True}, nxpe=1, nype=1)
expected = ds # Should be unchanged
xrt.assert_equal(expected, actual)
def test_keep_yboundaries(self):
ds = create_test_data(0)
ds = ds.rename({'dim2': 'y'})
# Manually add filename - encoding normally added by xr.open_dataset
ds.encoding['source'] = 'folder0/BOUT.dmp.0.nc'
ds['jyseps2_1'] = 8
ds['jyseps1_2'] = 8
actual = _trim(ds, guards={'y': 2}, keep_boundaries={'y': True}, nxpe=1, nype=1)
expected = ds # Should be unchanged
xrt.assert_equal(expected, actual)
@pytest.mark.parametrize("filenum, lower, upper",
[(0, True, False),
(1, False, True),
(2, True, False),
(3, False, True)])
def test_keep_yboundaries_doublenull_by_filenum(self, filenum, lower, upper):
ds = create_test_data(0)
ds = ds.rename({'dim2': 'y'})
# Manually add filename - encoding normally added by xr.open_dataset
ds.encoding['source'] = 'folder0/BOUT.dmp.'+str(filenum)+'.nc'
ds['jyseps2_1'] = 3
ds['jyseps1_2'] = 11
ds['ny_inner'] = 8
ds['MYSUB'] = 4
actual = _trim(ds, guards={'y': 2}, keep_boundaries={'y': True}, nxpe=1, nype=4)
expected = ds # Should be unchanged
if not lower:
expected = expected.isel(y=slice(2, None, None))
if not upper:
expected = expected.isel(y=slice(None, -2, None))
xrt.assert_equal(expected, actual)
def test_trim_timing_info(self):
ds = create_test_data(0)
from xbout.load import _BOUT_PER_PROC_VARIABLES
# remove a couple of entries from _BOUT_PER_PROC_VARIABLES so we test that _trim
# does not fail if not all of them are present
_BOUT_PER_PROC_VARIABLES = _BOUT_PER_PROC_VARIABLES[:-2]
for v in _BOUT_PER_PROC_VARIABLES:
ds[v] = 42.
ds = _trim(ds, guards={}, keep_boundaries={}, nxpe=1, nype=1)
expected = create_test_data(0)
xrt.assert_equal(ds, expected)
``` |
{
"source": "johnoneil/arib",
"score": 3
} |
#### File: arib/mpeg/ts.py
```python
import os
import sys
import argparse
import struct
# memorymap file on 64 bit systems
import mmap
class ES:
""" very minimalistic Elementary Stream handling
"""
STREAM_ID_INDEX = 3
@staticmethod
def pes_packet_check_formedness(payload):
""" Check formedness of pes packet and indicate we have the entire payload
"""
b1 = ord(payload[0])
b2 = ord(payload[1])
b3 = ord(payload[2])
b4 = ord(payload[3])
if b1 != 0 or b2 != 0 or b3 != 1:
return False
return True
@staticmethod
def get_pes_stream_id(payload):
return ord(payload[ES.STREAM_ID_INDEX])
@staticmethod
def get_pes_packet_length(payload):
if len(payload)<6:
return 0
# we add 6 for start code, stream id and pes packet length itself
return struct.unpack('>H', payload[4:6])[0] + 6
@staticmethod
def get_pes_flags(payload):
return struct.unpack('>H', payload[6:8])[0]
@staticmethod
def get_pes_header_length(payload):
# 6 is initial prefix, streamid and then pes packet length
# 3 is for header flags and header size value
# value at byte 8 gives the remaining bytes in the header including stuffing
if len(payload) < 9:
return 0
return 6 + 3 + ord(payload[8])
@staticmethod
def get_pes_payload_length(payload):
return ES.get_pes_packet_length(payload) - ES.get_pes_header_length(payload)
@staticmethod
def get_pes_payload(payload):
payload_start = ES.get_pes_header_length(payload)
return payload[payload_start:]
@staticmethod
def pes_packet_complete(payload):
pes_packet_len = ES.get_pes_packet_length(payload)
payload_len = len(payload)
return pes_packet_len == payload_len
class TS(object):
""" very minimalistic Transport stream handling
"""
PACKET_SIZE = 188
# Sync byte
SYNC_BYTE_INDEX = 0
SYNC_BYTE = 'G'
# Transport Error Indicator (TEI)
TEI_INDEX = 1
TEI_MASK = 0x80
# Payload Unit Start Indicator (PUSI)
PUSI_INDEX = 1
PUSI_MASK = 0x40
#Packt ID (PID)
PID_START_INDEX = 1
PID_LENGTH_BYTES = 2
PID_MASK = 0x1fff
# Transport Scrambling Control (TSC)
TSC_INDEX = 3
TSC_MASK = 0xc0
# Adaptation field control
ADAPTATION_FIELD_CONTROL_INDEX = 3
ADAPTATION_FIELD_CONTROL_MASK = 0x30
NO_ADAPTATION_FIELD = 0b01
ADAPTATION_FIELD_ONLY = 0b10
ADAPTATION_FIELD_AND_PAYLOAD = 0b11
ADAPTATION_FIELD_RESERVED = 0b00
# Continuity counter
CONTINUITY_COUNTER_INDEX = 3
CONTINUITY_COUNTER_MASK = 0x0f
# Adaptation field data (if present)
ADAPTATION_FIELD_LENGTH_INDEX = 4
ADAPTATION_FIELD_DATA_INDEX = 5
# Program Clock Reference (PCR)
# Present flag tagged in ADAPTATION_FIELD_DATA_INDEX byte
PCR_FLAG_MASK = 0x10
PCR_START_INDEX = 6
PCR_SIZE_BYTES = 6
@staticmethod
def next_packet(filename, memorymap=True):
""" Generator to remove a series of TS packets from a TS file
"""
with open(filename, 'rb') as f:
#memory map the file if necessary (prob requires 64 bit systems)
_file = f
if memorymap:
_file = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)
while True:
packet = _file.read(TS.PACKET_SIZE)
if packet:
# first byte SHOULD be the sync byte
# but if it isn't find one.
if packet[0] != TS.SYNC_BYTE:
start_byte = 0
print packet[0]
for i in range(start_byte, TS.PACKET_SIZE):
if packet[i] == TS.SYNC_BYTE:
start_byte = i
break
# didn't find a new start? FAIL
if start_byte == 0:
raise Exception("failure to find sync byte in ts packet size.")
continue
remainder = _file.read(TS.PACKET_SIZE - start_byte)
packet = packet[start_byte:] + remainder
yield packet
else:
break
@staticmethod
def check_packet_formedness(packet):
"""Check some features of this packet and see if it's well formed or not
"""
if len(packet) != TS.PACKET_SIZE:
raise Exception("Provided input packet string not of correct size")
if packet[0] != TS.SYNC_BYTE:
raise Exception("Provided input packet does not begin with correct sync byte.")
@staticmethod
def get_transport_error_indicator(packet):
return (ord(packet[TS.TEI_INDEX]) & TS.TEI_MASK) != 0
@staticmethod
def get_payload_start(packet):
return (ord(packet[TS.PUSI_INDEX]) & TS.PUSI_MASK) != 0
@staticmethod
def get_pid(packet):
"""Given a stringified MPEG TS packet, extract the PID value
and return it as a simple integer value.
Do this as quickly as possible for performance
"""
return ((ord(packet[TS.PID_START_INDEX]) & 0x1f)<<8) | ord(packet[TS.PID_START_INDEX+1])
@staticmethod
def get_tsc(packet):
"""get value of Transport Scrambling Control indicato
"""
return (ord(packet[TS.TSC_INDEX]) & TS.TSC_MASK) >> 6
@staticmethod
def get_adaptation_field_control(packet):
""" get the adaptation field control value for this packet
"""
return (ord(packet[TS.ADAPTATION_FIELD_CONTROL_INDEX]) & TS.ADAPTATION_FIELD_CONTROL_MASK) >> 4
@staticmethod
def get_continuity_counter(packet):
""" Get the continuity counter value for this packet
"""
return ord(packet[TS.CONTINUITY_COUNTER_INDEX]) & TS.CONTINUITY_COUNTER_MASK
@staticmethod
def get_adaptation_field_length(packet):
""" Get the length of the adaptation field for this packet.
Can return 0 if none is present.
"""
if TS.get_adaptation_field_control(packet) == TS.NO_ADAPTATION_FIELD:
return 0
#we add one byte here for the adaptation field length data itself
return ord(packet[TS.ADAPTATION_FIELD_LENGTH_INDEX]) + 1
@staticmethod
def adaptation_field_present(packet):
return TS.get_adaptation_field_control(packet) != TS.NO_ADAPTATION_FIELD
@staticmethod
def get_pcr(packet):
""" Get the Program Clock Reference for this packet if present.
Can return 0 if data not present.
"""
if not TS.adaptation_field_present(packet):
return 0
if not ord(packet[TS.ADAPTATION_FIELD_DATA_INDEX]) & TS.PCR_FLAG_MASK:
return 0
b1 = struct.unpack('>L', packet[TS.PCR_START_INDEX:TS.PCR_START_INDEX+4])[0]
b2 = struct.unpack('>H', packet[TS.PCR_START_INDEX+4:TS.PCR_START_INDEX+6])[0]
base = (b1 << 1) | (b2 >> 15) # 33 bit base
extension = b2 & 0x1ff # 9 bit extension
# TODO: proper extension handling as per the spec
# returning the base gives us good results currently
#return base * 300 + extension
return base
@staticmethod
def pcr_delta_time_ms(pcr_t1, pcr_t2, offset = 0):
"""Return a floating point time in milliseconds representing the
Difference in time between two PCR timestamps
"""
return float(pcr_t2-pcr_t1)/90000.0 + offset
@staticmethod
def get_payload_length(packet):
"""Payload length from an 188 byte ts packet
"""
adaptation_field_len = TS.get_adaptation_field_length(packet)
return 188 - 4 - adaptation_field_len
@staticmethod
def get_payload(packet):
""" return a byte array deep copy of 188 byte ts packet payload
"""
#payload_len = get_payload_length(packet)
adaptation_field_len = TS.get_adaptation_field_length(packet)
header_size = 4 + adaptation_field_len
return packet[header_size:]
def __init__(self, filename):
self._filename = filename
self._total_filesize = os.path.getsize(filename)
self._read_size = 0
self.Progress = None
self.OnTSPacket = None
self.OnESPacket = None
self.OnTSPacketError = None
self.OnESPacketError = None
self._elementary_streams = {}
def Parse(self):
""" Go through the .ts file, and invoke a callback on each TS packet and ES packet
Also invoke progress callbacks and packet error callbacks as appropriate
"""
prev_percent_read = 0
for packet in TS.next_packet(self._filename):
#check_packet_formedness(packet)
pei = TS.get_transport_error_indicator(packet)
pusi = TS.get_payload_start(packet)
pid = TS.get_pid(packet)
tsc = TS.get_tsc(packet)
# per .ts packet handler
if self.OnTSPacket:
self.OnTSPacket(packet)
# Update a progress callback
self._read_size += TS.PACKET_SIZE
percent_read = ((self._read_size / float(self._total_filesize)) * 100)
new_percent_read = int(percent_read * 100)
if new_percent_read != prev_percent_read and self.Progress:
self.Progress(self._read_size, self._total_filesize, percent_read)
prev_percent_read = new_percent_read
adaptation_field_control = TS.get_adaptation_field_control(packet)
continuity_counter = TS.get_continuity_counter(packet)
# put together PES from payloads
payload = TS.get_payload(packet)
if pusi == True:
if not ES.pes_packet_check_formedness(payload):
if pid in self._elementary_streams:
self._elementary_streams[pid] = None
continue
pes_id = ES.get_pes_stream_id(payload)
self._elementary_streams[pid] = payload
else:
if pid in self._elementary_streams:
# TODO: check packet sequence counter
if not self._elementary_streams[pid]:
self._elementary_streams[pid] = ""
self._elementary_streams[pid] += payload
else:
# TODO: throw. this situaiton means out of order packets
pass
if pid in self._elementary_streams and ES.pes_packet_complete(self._elementary_streams[pid]):
# TODO: handle packet contents here (callback)
es = self._elementary_streams[pid]
if self.OnESPacket:
header_size = ES.get_pes_header_length(es)
self.OnESPacket(pid, es, header_size)
# GLOBALS TO KEEP TRACK OF STATE
initial_timestamp = 0
elapsed_time_s = 0
def OnProgress(bytes_read, total_bytes, percent):
"""
Callback method invoked on a change in file progress percent (not every packet)
Meant as a lower frequency callback to update onscreen progress percent or something.
:param bytes_read:
:param total_bytes:
:param percent:
:return:
"""
sys.stdout.write("progress: %.2f%% \r" % (percent))
sys.stdout.flush()
def OnTSPacket(packet):
"""
Callback invoked on the successful extraction of a single TS packet from a ts file
:param packet: The entire packet (header and payload) as a string
:return: None
"""
global initial_timestamp
#pcr (program count record) can be used to calculate elapsed time in seconds
# we've read through the .ts file
pcr = TS.get_pcr(packet)
current_timestamp = pcr
initial_timestamp = initial_timestamp or current_timestamp
delta = current_timestamp - initial_timestamp
elapsed_time_s = float(delta)/90000.0
def OnESPacket(current_pid, packet, header_size):
"""
Callback invoked on the successful extraction of an Elementary Stream packet from the
Transport Stream file packets.
:param current_pid: The TS Program ID for the TS packets this info originated from
:param packet: The ENTIRE ES packet, header and payload-- which may have been assembled
from multiple TS packet payloads.
:param header_size: Size of the header in bytes (characters in the string). Provided to more
easily separate the packet into header and payload.
:return: None
"""
pass
def main():
parser = argparse.ArgumentParser(description='Draw CC Packets from MPG2 Transport Stream file.')
parser.add_argument('infile', help='Input filename (MPEG2 Transport Stream File)', type=str)
args = parser.parse_args()
infilename = args.infile
if not os.path.exists(infilename):
print 'Input filename :' + infilename + " does not exist."
os.exit(-1)
ts = TS(infilename)
ts.Progress = OnProgress
ts.OnTSPacket = OnTSPacket
ts.OnESPacket = OnESPacket
ts.Parse()
if __name__ == "__main__":
main()
``` |
{
"source": "johnoneill98/idem-azurerm",
"score": 2
} |
#### File: azurerm/compute/virtual_machine_extension.py
```python
from __future__ import absolute_import
import logging
# Azure libs
HAS_LIBS = False
try:
import azure.mgmt.compute.models # pylint: disable=unused-import
from msrestazure.azure_exceptions import CloudError
HAS_LIBS = True
except ImportError:
pass
__func_alias__ = {"list_": "list"}
log = logging.getLogger(__name__)
async def create_or_update(
hub,
ctx,
name,
vm_name,
resource_group,
location,
publisher,
extension_type,
version,
settings,
**kwargs,
):
"""
.. versionadded:: 2.0.0
The operation to create or update the extension.
:param name: The name of the virtual machine extension.
:param vm_name: The name of the virtual machine where the extension should be created or updated.
:param resource_group: The name of the resource group.
:param location: Resource location.
:param publisher: The publisher of the extension.
:param extension_type: Specifies the type of the extension; an example is "CustomScriptExtension".
:param version: Specifies the version of the script handler.
:param settings: A dictionary representing the public settings for the extension. This dictionary will be
utilized as JSON by the SDK operation..
CLI Example:
.. code-block:: bash
azurerm.compute.virtual_machine_extension.create_or_update test_name test_vm test_group test_loc \
test_publisher test_type test_version test_settings
"""
result = {}
compconn = await hub.exec.azurerm.utils.get_client(ctx, "compute", **kwargs)
try:
paramsmodel = await hub.exec.azurerm.utils.create_object_model(
"compute",
"VirtualMachineExtension",
location=location,
settings=settings,
publisher=publisher,
virtual_machine_extension_type=extension_type,
type_handler_version=version,
**kwargs,
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
extension = compconn.virtual_machine_extensions.create_or_update(
vm_extension_name=name,
vm_name=vm_name,
resource_group_name=resource_group,
extension_parameters=paramsmodel,
)
extension.wait()
result = extension.result().as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def delete(hub, ctx, name, vm_name, resource_group, **kwargs):
"""
.. versionadded:: 2.0.0
The operation to delete the extension.
:param name: The name of the virtual machine extension.
:param vm_name: The name of the virtual machine where the extension should be deleted.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
azurerm.compute.virtual_machine_extension.delete test_name test_vm test_group
"""
result = False
compconn = await hub.exec.azurerm.utils.get_client(ctx, "compute", **kwargs)
try:
extension = compconn.virtual_machine_extensions.delete(
vm_extension_name=name, vm_name=vm_name, resource_group_name=resource_group
)
extension.wait()
result = True
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def get(hub, ctx, name, vm_name, resource_group, **kwargs):
"""
.. versionadded:: 2.0.0
The operation to get the extension.
:param name: The name of the virtual machine extension.
:param vm_name: The name of the virtual machine containing the extension.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
azurerm.compute.virtual_machine_extension.get test_name test_vm test_group
"""
result = {}
compconn = await hub.exec.azurerm.utils.get_client(ctx, "compute", **kwargs)
try:
extension = compconn.virtual_machine_extensions.get(
vm_extension_name=name, vm_name=vm_name, resource_group_name=resource_group
)
result = extension.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def list_(hub, ctx, vm_name, resource_group, **kwargs):
"""
.. versionadded:: 2.0.0
The operation to get all extensions of a Virtual Machine.
:param vm_name: The name of the virtual machine containing the extension.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
azurerm.compute.virtual_machine_extension.list test_vm test_group
"""
result = {}
compconn = await hub.exec.azurerm.utils.get_client(ctx, "compute", **kwargs)
try:
extensions = compconn.virtual_machine_extensions.list(
vm_name=vm_name, resource_group_name=resource_group
)
extensions_as_list = extensions.as_dict().get("value", {})
for extension in extensions_as_list:
result[extension["name"]] = extension
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("compute", str(exc), **kwargs)
result = {"error": str(exc)}
return result
```
#### File: azurerm/graphrbac/client.py
```python
import logging
# Azure libs
HAS_LIBS = False
try:
from azure.graphrbac import GraphRbacManagementClient
HAS_LIBS = True
except ImportError:
pass
log = logging.getLogger(__name__)
def __virtual__(hub):
"""
Only load when Azure SDK imports successfully.
"""
return HAS_LIBS
async def get(hub, ctx, **kwargs):
"""
.. versionadded:: 2.4.0
Load the Graph RBAC Management client and return a GraphRbacManagementClient object.
"""
(
credentials,
subscription_id,
cloud_env,
) = await hub.exec.azurerm.utils.determine_auth(ctx, **kwargs)
graph_client = GraphRbacManagementClient(
credentials=credentials, tenant_id=credentials._tenant
)
return graph_client
```
#### File: azurerm/monitor/diagnostic_setting.py
```python
from __future__ import absolute_import
import logging
# Azure libs
HAS_LIBS = False
try:
import azure.mgmt.monitor.models # pylint: disable=unused-import
from msrest.exceptions import SerializationError
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.monitor.models import ErrorResponseException
HAS_LIBS = True
except ImportError:
pass
__func_alias__ = {"list_": "list"}
log = logging.getLogger(__name__)
async def create_or_update(
hub,
ctx,
name,
resource_uri,
metrics,
logs,
workspace_id=None,
storage_account_id=None,
service_bus_rule_id=None,
event_hub_authorization_rule_id=None,
event_hub_name=None,
**kwargs,
):
"""
.. versionadded:: 1.0.0
Create or update diagnostic settings for the specified resource. At least one destination for the diagnostic
setting logs is required. Any combination of the following destinations is acceptable:
1. Archive the diagnostic settings to a stroage account. This would require the storage_account_id param.
2. Stream the diagnostic settings to an event hub. This would require the event_hub_name and
event_hub_authorization_rule_id params.
3. Send the diagnostic settings to Log Analytics. This would require the workspace_id param.
:param name: The name of the diagnostic setting.
:param resource_uri: The identifier of the resource.
:param metrics: A list of dictionaries representing valid MetricSettings objects. If this list is empty, then the
list passed as the logs parameter must have at least one element. Valid parameters are:
- ``category``: Name of a diagnostic metric category for the resource type this setting is applied to. To obtain
the list of diagnostic metric categories for a resource, first perform a GET diagnostic setting operation.
This is a required parameter.
- ``enabled``: A value indicating whether this category is enabled. This is a required parameter.
- ``time_grain``: An optional timegrain of the metric in ISO-8601 format.
- ``retention_policy``: An optional dictionary representing a RetentionPolicy object for the specified category.
The default retention policy for a diagnostic setting is {'enabled': False, 'days': 0}. Required parameters
include:
- ``days``: The number of days for the retention in days. A value of 0 will retain the events indefinitely.
- ``enabled``: A value indicating whether the retention policy is enabled.
:param logs: A list of dictionaries representing valid LogSettings objects. If this list is empty, then the list
passed as the metrics parameter must have at least one element. Valid parameters are:
- ``category``: Name of a diagnostic log category for the resource type this setting is applied to. To obtain
the list of diagnostic log categories for a resource, first perform a GET diagnostic setting operation.
This is a required parameter.
- ``enabled``: A value indicating whether this category is enabled. This is a required parameter.
- ``retention_policy``: An optional dictionary representing a RetentionPolicy object for the specified category.
The default retention policy for a diagnostic setting is {'enabled': False, 'days': 0}. Required parameters
include:
- ``days``: The number of days for the retention in days. A value of 0 will retain the events indefinitely.
- ``enabled``: A value indicating whether the retention policy is enabled.
:param workspace_id: The workspace (resource) ID for the Log Analytics workspace to which you would like to
send Diagnostic Logs.
:param storage_account_id: The resource ID of the storage account to which you would like to send Diagnostic Logs.
:param service_bus_rule_id: The service bus rule ID of the diagnostic setting. This is here to
maintain backwards compatibility.
:param event_hub_authorization_rule_id: The resource ID for the event hub authorization rule.
:param event_hub_name: The name of the event hub. If none is specified, the default event hub will be selected.
CLI Example:
.. code-block:: bash
azurerm.monitor.diagnostic_setting.create_or_update test_name test_uri test_metrics test_logs \
test_destination
"""
result = {}
moniconn = await hub.exec.azurerm.utils.get_client(ctx, "monitor", **kwargs)
try:
diagmodel = await hub.exec.azurerm.utils.create_object_model(
"monitor",
"DiagnosticSettingsResource",
metrics=metrics,
logs=logs,
workspace_id=workspace_id,
storage_account_id=storage_account_id,
service_bus_rule_id=service_bus_rule_id,
event_hub_authorization_rule_id=event_hub_authorization_rule_id,
event_hub_name=event_hub_name,
**kwargs,
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
diag = moniconn.diagnostic_settings.create_or_update(
name=name, resource_uri=resource_uri, parameters=diagmodel
)
result = diag.as_dict()
except (CloudError, ErrorResponseException) as exc:
await hub.exec.azurerm.utils.log_cloud_error("monitor", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def delete(hub, ctx, name, resource_uri, **kwargs):
"""
.. versionadded:: 1.0.0
Deletes existing diagnostic settings for the specified resource.
:param name: The name of the diagnostic setting.
:param resource_uri: The identifier of the resource.
CLI Example:
.. code-block:: bash
azurerm.monitor.diagnostic_setting.delete test_name test_uri
"""
result = False
moniconn = await hub.exec.azurerm.utils.get_client(ctx, "monitor", **kwargs)
try:
diag = moniconn.diagnostic_settings.delete(
name=name, resource_uri=resource_uri, **kwargs
)
result = True
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("monitor", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def get(hub, ctx, name, resource_uri, **kwargs):
"""
.. versionadded:: 1.0.0
Gets the active diagnostic settings for the specified resource.
:param name: The name of the diagnostic setting.
:param resource_uri: The identifier of the resource.
CLI Example:
.. code-block:: bash
azurerm.monitor.diagnostic_setting.get test_name test_uri
"""
result = {}
moniconn = await hub.exec.azurerm.utils.get_client(ctx, "monitor", **kwargs)
try:
diag = moniconn.diagnostic_settings.get(
name=name, resource_uri=resource_uri, **kwargs
)
result = diag.as_dict()
except (CloudError, ErrorResponseException) as exc:
await hub.exec.azurerm.utils.log_cloud_error("monitor", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def list_(hub, ctx, resource_uri, **kwargs):
"""
.. versionadded:: 1.0.0
Gets the active diagnostic settings list for the specified resource.
:param resource_uri: The identifier of the resource.
CLI Example:
.. code-block:: bash
azurerm.monitor.diagnostic_setting.list test_uri
"""
result = {}
moniconn = await hub.exec.azurerm.utils.get_client(ctx, "monitor", **kwargs)
try:
diag = moniconn.diagnostic_settings.list(resource_uri=resource_uri, **kwargs)
values = diag.as_dict().get("value", [])
for value in values:
result[value["name"]] = value
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("monitor", str(exc), **kwargs)
result = {"error": str(exc)}
return result
```
#### File: azurerm/network/local_network_gateway.py
```python
from __future__ import absolute_import
import logging
try:
from six.moves import range as six_range
except ImportError:
six_range = range
# Azure libs
HAS_LIBS = False
try:
import azure.mgmt.network.models # pylint: disable=unused-import
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from msrest.exceptions import SerializationError
from msrestazure.azure_exceptions import CloudError
HAS_LIBS = True
except ImportError:
pass
__func_alias__ = {"list_": "list"}
log = logging.getLogger(__name__)
async def create_or_update(
hub, ctx, name, resource_group, gateway_ip_address, **kwargs
):
"""
.. versionadded:: 1.0.0
Creates or updates a local network gateway object in the specified resource group.
:param name: The name of the local network gateway object to be created or updated.
:param resource_group: The name of the resource group associated with the local network gateway.
:param gateway_ip_address: IP address of the local network gateway.
CLI Example:
.. code-block:: bash
azurerm.network.local_network_gateway.create_or_update test_name test_group test_ip
"""
if "location" not in kwargs:
rg_props = await hub.exec.azurerm.resource.group.get(
ctx, resource_group, **kwargs
)
if "error" in rg_props:
log.error("Unable to determine location from resource group specified.")
return {
"error": "Unable to determine location from resource group specified."
}
kwargs["location"] = rg_props["location"]
netconn = await hub.exec.azurerm.utils.get_client(ctx, "network", **kwargs)
try:
gatewaymodel = await hub.exec.azurerm.utils.create_object_model(
"network",
"LocalNetworkGateway",
gateway_ip_address=gateway_ip_address,
**kwargs,
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
gateway = netconn.local_network_gateways.create_or_update(
local_network_gateway_name=name,
resource_group_name=resource_group,
parameters=gatewaymodel,
)
gateway.wait()
gateway_result = gateway.result()
result = gateway_result.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("network", str(exc), **kwargs)
result = {"error": str(exc)}
except SerializationError as exc:
result = {
"error": "The object model could not be parsed. ({0})".format(str(exc))
}
return result
async def get(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 1.0.0
Gets the details of a specific local network gateway within a specified resource group.
:param name: The name of the local network gateway.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
azurerm.network.local_network_gateway.get test_name test_group
"""
netconn = await hub.exec.azurerm.utils.get_client(ctx, "network", **kwargs)
try:
gateway = netconn.local_network_gateways.get(
resource_group_name=resource_group, local_network_gateway_name=name
)
result = gateway.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("network", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def delete(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 1.0.0
Deletes the specified local network gateway.
:param name: The name of the local network gateway that will be deleted.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
azurerm.network.local_network_gateway.delete test_name test_group
"""
result = False
netconn = await hub.exec.azurerm.utils.get_client(ctx, "network", **kwargs)
try:
gateway = netconn.local_network_gateways.delete(
resource_group_name=resource_group, local_network_gateway_name=name
)
gateway.wait()
result = True
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("network", str(exc), **kwargs)
return result
async def list_(hub, ctx, resource_group, **kwargs):
"""
.. versionadded:: 1.0.0
Lists all local network gateways within a resource group.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
azurerm.network.local_network_gateway.list test_group
"""
result = {}
netconn = await hub.exec.azurerm.utils.get_client(ctx, "network", **kwargs)
try:
gateways = await hub.exec.azurerm.utils.paged_object_to_list(
netconn.local_network_gateways.list(resource_group_name=resource_group)
)
for gateway in gateways:
result[gateway["name"]] = gateway
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("network", str(exc), **kwargs)
result = {"error": str(exc)}
return result
```
#### File: azurerm/postgresql/database.py
```python
from __future__ import absolute_import
import logging
# Azure libs
HAS_LIBS = False
try:
import azure.mgmt.rdbms.postgresql.models # pylint: disable=unused-import
from msrestazure.azure_exceptions import CloudError
from msrest.exceptions import ValidationError
HAS_LIBS = True
except ImportError:
pass
log = logging.getLogger(__name__)
async def create_or_update(
hub, ctx, name, server_name, resource_group, charset=None, collation=None, **kwargs
):
"""
.. versionadded:: 2.0.0
Creates a new database or updates an existing database.
:param name: The name of the database.
:param server_name: The name of the server.
:param resource_group: The name of the resource group. The name is case insensitive.
:param charset: The charset of the database. Defaults to None.
:param collation: The collation of the database. Defaults to None.
CLI Example:
.. code-block:: bash
azurerm.postgresql.database.create_or_update test_name test_server test_group test_charset test_collation
"""
result = {}
postconn = await hub.exec.azurerm.utils.get_client(ctx, "postgresql", **kwargs)
try:
database = postconn.databases.create_or_update(
database_name=name,
server_name=server_name,
resource_group_name=resource_group,
charset=charset,
collation=collation,
)
database.wait()
result = database.result().as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("postgresql", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def delete(hub, ctx, name, server_name, resource_group, **kwargs):
"""
.. versionadded:: 2.0.0
Deletes a database.
:param name: The name of the database.
:param server_name: The name of the server.
:param resource_group: The name of the resource group. The name is case insensitive.
CLI Example:
.. code-block:: bash
azurerm.postgresql.database.delete test_name test_server test_group
"""
result = False
postconn = await hub.exec.azurerm.utils.get_client(ctx, "postgresql", **kwargs)
try:
database = postconn.databases.delete(
database_name=name,
server_name=server_name,
resource_group_name=resource_group,
)
database.wait()
result = True
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("postgresql", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def get(hub, ctx, name, server_name, resource_group, **kwargs):
"""
.. versionadded:: 2.0.0
Gets information about a database.
:param name: The name of the database.
:param server_name: The name of the server.
:param resource_group: The name of the resource group. The name is case insensitive.
CLI Example:
.. code-block:: bash
azurerm.postgresql.database.get test_name test_server test_group
"""
result = {}
postconn = await hub.exec.azurerm.utils.get_client(ctx, "postgresql", **kwargs)
try:
database = postconn.databases.get(
database_name=name,
server_name=server_name,
resource_group_name=resource_group,
)
result = database.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("postgresql", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def list_by_server(hub, ctx, server_name, resource_group, **kwargs):
"""
.. versionadded:: 2.0.0
List all the databases in a given server.
:param server_name: The name of the server.
:param resource_group: The name of the resource group. The name is case insensitive.
CLI Example:
.. code-block:: bash
azurerm.postgresql.database.list_by_server test_server test_group
"""
result = {}
postconn = await hub.exec.azurerm.utils.get_client(ctx, "postgresql", **kwargs)
try:
databases = await hub.exec.azurerm.utils.paged_object_to_list(
postconn.databases.list_by_server(
server_name=server_name, resource_group_name=resource_group
)
)
for database in databases:
result[database["name"]] = database
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("postgresql", str(exc), **kwargs)
result = {"error": str(exc)}
return result
```
#### File: azurerm/redis/operations.py
```python
from __future__ import absolute_import
import logging
# Azure libs
HAS_LIBS = False
try:
import azure.mgmt.network.models # pylint: disable=unused-import
from msrest.exceptions import SerializationError
from msrestazure.azure_exceptions import CloudError
HAS_LIBS = True
except ImportError:
pass
__func_alias__ = {"list_": "list"}
log = logging.getLogger(__name__)
async def check_name_availability(hub, ctx, name, **kwargs):
"""
.. versionadded:: 2.0.0
Checks that the redis cache name is valid and is not already in use.
:param name: The name of the Redis cache to check the availability of
CLI Example:
.. code-block:: bash
azurerm.redis.operations.check_name_availability test_name
"""
result = False
redconn = await hub.exec.azurerm.utils.get_client(ctx, "redis", **kwargs)
try:
avail = redconn.redis.check_name_availability(
name=name, type="Microsoft.Cache/redis",
)
if avail is None:
result = True
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("redis", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def create(
hub,
ctx,
name,
resource_group,
location,
sku,
redis_configuration=None,
enable_non_ssl_port=False,
tenant_settings=None,
shard_count=None,
minimum_tls_version=None,
subnet_id=None,
static_ip=None,
zones=None,
**kwargs,
):
"""
.. versionadded:: 2.0.0
Create or replace (overwrite/recreate, with potential downtime) an existing Redis cache.
:param name: The name of the Redis cache.
:param resource_group: The name of the resource group.
:param location: The geo-location where the resource lives.
:param sku: A dictionary representing the SKU of the Redis cache to deploy. Required parameters include:
- ``name``: The type of Redis cache to deploy. Possible values include: 'Basic', 'Standard', and 'Premium'.
- ``family``: The SKU family to use. Possible values include 'C' for Basic/Standard and 'P' for Premium.
- ``capacity``: The size of the Redis cache to deploy. Possible values include 0, 1, 2, 3, 4, 5, and 6 for the
C (Basic/Standard) family and 1, 2, 3, and 4 for the P (Premium) family.
:param redis_configuration: A dictionary of string key-value pairs that represent all Redis Settings. Some possible
keys include: rdb-backup-enabled, rdb-storage-connection-string, rdb-backup-frequency, maxmemory-delta,
maxmemory-policy, notify-keyspace-events, maxmemory-samples, slowlog-log-slower-than, slowlog-max-len,
list-max-ziplist-entries, list-max-ziplist-value, hash-max-ziplist-entries, hash-max-ziplist-value,
set-max-intset-entries, zset-max-ziplist-entries, zset-max-ziplist-value, and more.
:param enable_non_ssl_port: Specifies whether the non-ssl Redis server port (6379) is enabled. Defaults to False.
:param tenant_settings: A dictionary of tenant settings.
:param shard_count: The number of shards to be created on a Premium Cluster Cache.
:param minimum_tls_version: The specified TLS version (or higher) that clients are required to use. Possible values
include: '1.0', '1.1', and '1.2'.
:param subnet_id: The full resource ID of a subnet in a virtual network to deploy the Redis cache in. Example
format: /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/Microsoft.{Network|ClassicNetwork}/VirtualNetworks/vnet1/subnets/subnet1
:param static_ip: Static IP address. Required when deploying a Redis cache inside an existing Azure Virtual Network.
:param zones: A list of availability zones denoting where the resource needs to come from.
CLI Example:
.. code-block:: bash
azurerm.redis.operations.create test_name test_rg test_location test_sku
"""
redconn = await hub.exec.azurerm.utils.get_client(ctx, "redis", **kwargs)
try:
paramsmodel = await hub.exec.azurerm.utils.create_object_model(
"redis",
"RedisCreateParameters",
sku=sku,
location=location,
redis_configuration=redis_configuration,
enable_non_ssl_port=enable_non_ssl_port,
tenant_settings=tenant_settings,
shard_count=shard_count,
minimum_tls_version=minimum_tls_version,
subnet_id=subnet_id,
static_ip=static_ip,
zones=zones,
**kwargs,
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
cache = redconn.redis.create(
name=name, resource_group_name=resource_group, parameters=paramsmodel
)
result = cache.result().as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("redis", str(exc), **kwargs)
result = {"error": str(exc)}
except SerializationError as exc:
result = {
"error": "The object model could not be parsed. ({0})".format(str(exc))
}
return result
async def delete(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 2.0.0
Deletes a Redis cache.
:param name: The name of the Redis cache.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
azurerm.redis.operations.delete test_name test_rg
"""
result = False
redconn = await hub.exec.azurerm.utils.get_client(ctx, "redis", **kwargs)
try:
cache = redconn.redis.delete(name=name, resource_group_name=resource_group)
result = True
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("redis", str(exc), **kwargs)
return result
async def export_data(
hub, ctx, name, resource_group, prefix, container, file_format=None, **kwargs
):
"""
.. versionadded:: 2.0.0
Export data from the redis cache to blobs in a container.
:param name: The name of the Redis cache.
:param resource_group: The name of the resource group.
:param prefix: The prefix to use for exported files.
:param container: The name of the container to export to.
:param file_format: An optional file format.
CLI Example:
.. code-block:: bash
azurerm.redis.operations.export_data test_name test_rg test_prefix test_container
"""
result = {}
redconn = await hub.exec.azurerm.utils.get_client(ctx, "redis", **kwargs)
# Create a ExportRDBParameters object
try:
paramsmodel = await hub.exec.azurerm.utils.create_object_model(
"redis",
"ExportRDBParameters",
prefix=prefix,
container=container,
format=file_format,
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
cache = redconn.redis.export_data(
name=name, resource_group_name=resource_group, parameters=paramsmodel
)
result = cache.result().as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("redis", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def force_reboot(
hub, ctx, name, resource_group, reboot_type, shard_id=None, **kwargs
):
"""
.. versionadded:: 2.0.0
Reboot specified Redis node(s). This operation requires write permission to the cache resource.
There can be potential data loss.
:param name: The name of the redis cache.
:param resource_group: The name of the resource group.
:param reboot_type: Which Redis node(s) to reboot. Depending on this value data loss is possible. Possible
values include: 'PrimaryNode', 'SecondaryNode', 'AllNodes'.
:param shard_id: If clustering is enabled, the ID of the shard to be rebooted.
CLI Example:
.. code-block:: bash
azurerm.redis.operations.force_reboot test_name test_rg test_type test_id
"""
result = {}
redconn = await hub.exec.azurerm.utils.get_client(ctx, "redis", **kwargs)
try:
cache = redconn.redis.force_reboot(
name=name,
resource_group_name=resource_group,
reboot_type=reboot_type,
shard_id=shard_id,
)
result = cache.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("redis", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def get(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 2.0.0
Gets a Redis cache (resource description).
:param name: The name of the Redis cache.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
azurerm.redis.operations.get test_name test_rg
"""
result = {}
redconn = await hub.exec.azurerm.utils.get_client(ctx, "redis", **kwargs)
try:
cache = redconn.redis.get(name=name, resource_group_name=resource_group)
result = cache.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("redis", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def import_data(
hub, ctx, name, resource_group, files, file_format=None, **kwargs
):
"""
.. versionadded:: 2.0.0
Import data into Redis cache.
:param name: The name of the Redis cache.
:param resource_group: The name of the resource group.
:param files: A list of strings that represent the names of files to import.
:param file_format: An optional file format.
CLI Example:
.. code-block:: bash
azurerm.redis.operations.import_data test_name test_rg test_files
"""
result = {}
redconn = await hub.exec.azurerm.utils.get_client(ctx, "redis", **kwargs)
try:
cache = redconn.redis.import_data(
name=name,
resource_group_name=resource_group,
files=files,
format=file_format,
)
result = cache.result().as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("redis", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def list_(hub, ctx, **kwargs):
"""
.. versionadded:: 2.0.0
Gets all Redis caches in the specified subscription.
CLI Example:
.. code-block:: bash
azurerm.redis.operations.list
"""
result = {}
redconn = await hub.exec.azurerm.utils.get_client(ctx, "redis", **kwargs)
try:
caches = await hub.exec.azurerm.utils.paged_object_to_list(redconn.redis.list())
for cache in caches:
result[cache["name"]] = cache
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("redis", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def list_by_resource_group(hub, ctx, resource_group, **kwargs):
"""
.. versionadded:: 2.0.0
Lists all Redis caches in a resource group.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
azurerm.redis.operations.list_by_resource_group test_rg
"""
result = {}
redconn = await hub.exec.azurerm.utils.get_client(ctx, "redis", **kwargs)
try:
caches = await hub.exec.azurerm.utils.paged_object_to_list(
redconn.redis.list_by_resource_group(resource_group_name=resource_group,)
)
for cache in caches:
result[cache["name"]] = cache
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("redis", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def list_keys(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 2.0.0
Retrieve a Redis cache's access keys. This operation requires write permission to the cache resource.
:param name: The name of the Redis cache.
:param resource_group: The name of the resource group.
CLI Example:
.. code-block:: bash
azurerm.redis.operations.list_keys test_name test_rg
"""
result = {}
redconn = await hub.exec.azurerm.utils.get_client(ctx, "redis", **kwargs)
try:
keys = redconn.redis.list_keys(name=name, resource_group_name=resource_group)
result = keys.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("redis", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def list_upgrade_notifications(hub, ctx, name, resource_group, history, **kwargs):
"""
.. versionadded:: 2.0.0
Gets any upgrade notifications for a Redis cache.
:param name: The name of the Redis cache.
:param resource_group: The name of the resource group.
:param history: A float representing how many minutes in past to look for upgrade notifications.
CLI Example:
.. code-block:: bash
azurerm.redis.operations.list_upgrade_notifications test_name test_rg test_history
"""
result = {}
redconn = await hub.exec.azurerm.utils.get_client(ctx, "redis", **kwargs)
try:
notifications = redconn.redis.list_upgrade_notifications(
name=name, resource_group_name=resource_group, history=history
)
result = notifications.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("redis", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def regenerate_key(hub, ctx, name, resource_group, key_type, **kwargs):
"""
.. versionadded:: 2.0.0
Regenerate Redis cache's access keys. This operation requires write permission to the cache resource.
:param name: The name of the Redis cache.
:param resource_group: The name of the resource group.
:param key_type: The Redis access key to regenerate. Possible values include: 'Primary' and 'Secondary'.
CLI Example:
.. code-block:: bash
azurerm.redis.operations.renegerate_key test_name test_rg test_type
"""
redconn = await hub.exec.azurerm.utils.get_client(ctx, "redis", **kwargs)
try:
keys = redconn.redis.regenerate_key(
resource_group_name=resource_group, name=name, key_type=key_type, **kwargs
)
result = keys.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("redis", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def update(
hub,
ctx,
name,
resource_group,
sku=None,
redis_configuration=None,
enable_non_ssl_port=False,
tenant_settings=None,
shard_count=None,
minimum_tls_version=None,
**kwargs,
):
"""
.. versionadded:: 2.0.0
Update an existing Redis cache.
:param name: The name of the Redis cache.
:param resource_group: The name of the resource group.
:param sku: A dictionary representing the SKU of the Redis cache to deploy. Required parameters include:
- ``name``: The type of Redis cache to deploy. Possible values include: 'Basic', 'Standard', and 'Premium'.
- ``family``: The SKU family to use. Possible values include 'C' for Basic/Standard and 'P' for Premium.
- ``capacity``: The size of the Redis cache to deploy. Possible values include 0, 1, 2, 3, 4, 5, and 6 for the
C (Basic/Standard) family and 1, 2, 3, and 4 for the P (Premium) family.
:param redis_configuration: A dictionary of string key-value pairs that represent all Redis Settings. Some possible
keys include: rdb-backup-enabled, rdb-storage-connection-string, rdb-backup-frequency, maxmemory-delta,
maxmemory-policy, notify-keyspace-events, maxmemory-samples, slowlog-log-slower-than, slowlog-max-len,
list-max-ziplist-entries, list-max-ziplist-value, hash-max-ziplist-entries, hash-max-ziplist-value,
set-max-intset-entries, zset-max-ziplist-entries, zset-max-ziplist-value, and more.
:param enable_non_ssl_port: Specifies whether the non-ssl Redis server port (6379) is enabled. Defaults to False.
:param tenant_settings: A dictionary of tenant settings.
:param shard_count: The number of shards to be created on a Premium Cluster Cache.
:param minimum_tls_version: The specified TLS version (or higher) that clients are required to use. Possible values
include: '1.0', '1.1', and '1.2'.
CLI Example:
.. code-block:: bash
azurerm.redis.operations.update test_name test_rg test_location test_sku
"""
redconn = await hub.exec.azurerm.utils.get_client(ctx, "redis", **kwargs)
try:
paramsmodel = await hub.exec.azurerm.utils.create_object_model(
"redis",
"RedisUpdateParameters",
sku=sku,
redis_configuration=redis_configuration,
enable_non_ssl_port=enable_non_ssl_port,
tenant_settings=tenant_settings,
shard_count=shard_count,
minimum_tls_version=minimum_tls_version,
**kwargs,
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
cache = redconn.redis.update(
name=name, resource_group_name=resource_group, parameters=paramsmodel
)
result = cache.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("redis", str(exc), **kwargs)
result = {"error": str(exc)}
except SerializationError as exc:
result = {
"error": "The object model could not be parsed. ({0})".format(str(exc))
}
return result
```
#### File: azurerm/resource/deployment.py
```python
from __future__ import absolute_import
from json import loads, dumps
import logging
# Azure libs
HAS_LIBS = False
try:
import azure.mgmt.resource.resources.models # pylint: disable=unused-import
from msrest.exceptions import SerializationError
from msrestazure.azure_exceptions import CloudError
HAS_LIBS = True
except ImportError:
pass
__func_alias__ = {"list_": "list"}
log = logging.getLogger(__name__)
async def operation_get(hub, ctx, operation, deployment, resource_group, **kwargs):
"""
.. versionadded:: 1.0.0
Get a deployment operation within a deployment.
:param operation: The operation ID of the operation within the deployment.
:param deployment: The name of the deployment containing the operation.
:param resource_group: The resource group name assigned to the deployment.
CLI Example:
.. code-block:: bash
azurerm.resource.deployment.operation_get testoperation testdeploy testgroup
"""
resconn = await hub.exec.azurerm.utils.get_client(ctx, "resource", **kwargs)
try:
operation = resconn.deployment_operations.get(
resource_group_name=resource_group,
deployment_name=deployment,
operation_id=operation,
)
result = operation.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("resource", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def operations_list(hub, ctx, name, resource_group, result_limit=10, **kwargs):
"""
.. versionadded:: 1.0.0
List all deployment operations within a deployment.
:param name: The name of the deployment to query.
:param resource_group: The resource group name assigned to the deployment.
:param result_limit: (Default: 10) The limit on the list of deployment operations.
CLI Example:
.. code-block:: bash
azurerm.resource.deployment.operations_list testdeploy testgroup
"""
result = {}
resconn = await hub.exec.azurerm.utils.get_client(ctx, "resource", **kwargs)
try:
operations = await hub.exec.azurerm.utils.paged_object_to_list(
resconn.deployment_operations.list(
resource_group_name=resource_group,
deployment_name=name,
top=result_limit,
)
)
for oper in operations:
result[oper["operation_id"]] = oper
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("resource", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def delete(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 1.0.0
Delete a deployment.
:param name: The name of the deployment to delete.
:param resource_group: The resource group name assigned to the deployment.
CLI Example:
.. code-block:: bash
azurerm.resource.deployment.delete testdeploy testgroup
"""
result = False
resconn = await hub.exec.azurerm.utils.get_client(ctx, "resource", **kwargs)
try:
deploy = resconn.deployments.delete(
deployment_name=name, resource_group_name=resource_group
)
deploy.wait()
result = True
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("resource", str(exc), **kwargs)
return result
async def check_existence(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 1.0.0
Check the existence of a deployment.
:param name: The name of the deployment to query.
:param resource_group: The resource group name assigned to the deployment.
CLI Example:
.. code-block:: bash
azurerm.resource.deployment.check_existence testdeploy testgroup
"""
result = False
resconn = await hub.exec.azurerm.utils.get_client(ctx, "resource", **kwargs)
try:
result = resconn.deployments.check_existence(
deployment_name=name, resource_group_name=resource_group
)
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("resource", str(exc), **kwargs)
return result
async def create_or_update(
hub,
ctx,
name,
resource_group,
deploy_mode="incremental",
debug_setting="none",
deploy_params=None,
parameters_link=None,
deploy_template=None,
template_link=None,
**kwargs,
):
"""
.. versionadded:: 1.0.0
Deploys resources to a resource group.
:param name: The name of the deployment to create or update.
:param resource_group: The resource group name assigned to the deployment.
:param deploy_mode: The mode that is used to deploy resources. This value can be either
'incremental' or 'complete'. In Incremental mode, resources are deployed without deleting
existing resources that are not included in the template. In Complete mode, resources
are deployed and existing resources in the resource group that are not included in
the template are deleted. Be careful when using Complete mode as you may
unintentionally delete resources.
:param debug_setting: The debug setting of the deployment. The permitted values are 'none',
'requestContent', 'responseContent', or 'requestContent,responseContent'. By logging
information about the request or response, you could potentially expose sensitive data
that is retrieved through the deployment operations.
:param deploy_params: JSON string containing name and value pairs that define the deployment
parameters for the template. You use this element when you want to provide the parameter
values directly in the request rather than link to an existing parameter file. Use either
the parameters_link property or the deploy_params property, but not both.
:param parameters_link: The URI of a parameters file. You use this element to link to an existing
parameters file. Use either the parameters_link property or the deploy_params property, but not both.
:param deploy_template: JSON string of template content. You use this element when you want to pass
the template syntax directly in the request rather than link to an existing template. Use either
the template_link property or the deploy_template property, but not both.
:param template_link: The URI of the template. Use either the template_link property or the
deploy_template property, but not both.
CLI Example:
.. code-block:: bash
azurerm.resource.deployment.create_or_update testdeploy testgroup
"""
resconn = await hub.exec.azurerm.utils.get_client(ctx, "resource", **kwargs)
prop_kwargs = {"mode": deploy_mode}
prop_kwargs["debug_setting"] = {"detail_level": debug_setting}
if deploy_params:
prop_kwargs["parameters"] = deploy_params
else:
if isinstance(parameters_link, dict):
prop_kwargs["parameters_link"] = parameters_link
else:
prop_kwargs["parameters_link"] = {"uri": parameters_link}
if deploy_template:
prop_kwargs["template"] = deploy_template
else:
if isinstance(template_link, dict):
prop_kwargs["template_link"] = template_link
else:
prop_kwargs["template_link"] = {"uri": template_link}
deploy_kwargs = kwargs.copy()
deploy_kwargs.update(prop_kwargs)
try:
deploy_model = await hub.exec.azurerm.utils.create_object_model(
"resource.resources", "DeploymentProperties", **deploy_kwargs
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
validate = await hub.exec.azurerm.resource.deployment.validate(
ctx=ctx,
name=name,
resource_group=resource_group,
deploy_mode=deploy_mode,
debug_setting=debug_setting,
deploy_params=deploy_params,
parameters_link=parameters_link,
deploy_template=deploy_template,
template_link=template_link,
**kwargs,
)
if "error" in validate:
result = validate
else:
deploy = resconn.deployments.create_or_update(
deployment_name=name,
resource_group_name=resource_group,
properties=deploy_model,
)
deploy.wait()
deploy_result = deploy.result()
result = deploy_result.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("resource", str(exc), **kwargs)
result = {"error": str(exc)}
except SerializationError as exc:
result = {
"error": "The object model could not be parsed. ({0})".format(str(exc))
}
return result
async def get(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 1.0.0
Get details about a specific deployment.
:param name: The name of the deployment to query.
:param resource_group: The resource group name assigned to the deployment.
CLI Example:
.. code-block:: bash
azurerm.resource.deployment.get testdeploy testgroup
"""
resconn = await hub.exec.azurerm.utils.get_client(ctx, "resource", **kwargs)
try:
deploy = resconn.deployments.get(
deployment_name=name, resource_group_name=resource_group
)
result = deploy.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("resource", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def cancel(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 1.0.0
Cancel a deployment if in 'Accepted' or 'Running' state.
:param name: The name of the deployment to cancel.
:param resource_group: The resource group name assigned to the deployment.
CLI Example:
.. code-block:: bash
azurerm.resource.deployment.cancel testdeploy testgroup
"""
resconn = await hub.exec.azurerm.utils.get_client(ctx, "resource", **kwargs)
try:
resconn.deployments.cancel(
deployment_name=name, resource_group_name=resource_group
)
result = {"result": True}
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("resource", str(exc), **kwargs)
result = {"error": str(exc), "result": False}
return result
async def validate(
hub,
ctx,
name,
resource_group,
deploy_mode=None,
debug_setting=None,
deploy_params=None,
parameters_link=None,
deploy_template=None,
template_link=None,
**kwargs,
):
"""
.. versionadded:: 1.0.0
Validates whether the specified template is syntactically correct and will be accepted by Azure Resource Manager.
:param name: The name of the deployment to validate.
:param resource_group: The resource group name assigned to the deployment.
:param deploy_mode: The mode that is used to deploy resources. This value can be either
'incremental' or 'complete'. In Incremental mode, resources are deployed without deleting
existing resources that are not included in the template. In Complete mode, resources
are deployed and existing resources in the resource group that are not included in
the template are deleted. Be careful when using Complete mode as you may
unintentionally delete resources.
:param debug_setting: The debug setting of the deployment. The permitted values are 'none',
'requestContent', 'responseContent', or 'requestContent,responseContent'. By logging
information about the request or response, you could potentially expose sensitive data
that is retrieved through the deployment operations.
:param deploy_params: JSON string containing name and value pairs that define the deployment
parameters for the template. You use this element when you want to provide the parameter
values directly in the request rather than link to an existing parameter file. Use either
the parameters_link property or the deploy_params property, but not both.
:param parameters_link: The URI of a parameters file. You use this element to link to an existing
parameters file. Use either the parameters_link property or the deploy_params property, but not both.
:param deploy_template: JSON string of template content. You use this element when you want to pass
the template syntax directly in the request rather than link to an existing template. Use either
the template_link property or the deploy_template property, but not both.
:param template_link: The URI of the template. Use either the template_link property or the
deploy_template property, but not both.
CLI Example:
.. code-block:: bash
azurerm.resource.deployment.validate testdeploy testgroup
"""
resconn = await hub.exec.azurerm.utils.get_client(ctx, "resource", **kwargs)
prop_kwargs = {"mode": deploy_mode}
prop_kwargs["debug_setting"] = {"detail_level": debug_setting}
if deploy_params:
prop_kwargs["parameters"] = deploy_params
else:
if isinstance(parameters_link, dict):
prop_kwargs["parameters_link"] = parameters_link
else:
prop_kwargs["parameters_link"] = {"uri": parameters_link}
if deploy_template:
prop_kwargs["template"] = deploy_template
else:
if isinstance(template_link, dict):
prop_kwargs["template_link"] = template_link
else:
prop_kwargs["template_link"] = {"uri": template_link}
deploy_kwargs = kwargs.copy()
deploy_kwargs.update(prop_kwargs)
try:
deploy_model = await hub.exec.azurerm.utils.create_object_model(
"resource.resources", "DeploymentProperties", **deploy_kwargs
)
except TypeError as exc:
result = {
"error": "The object model could not be built. ({0})".format(str(exc))
}
return result
try:
local_validation = deploy_model.validate()
if local_validation:
raise local_validation[0]
deploy = resconn.deployments.validate(
deployment_name=name,
resource_group_name=resource_group,
properties=deploy_model,
)
result = deploy.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("resource", str(exc), **kwargs)
result = {"error": str(exc)}
except SerializationError as exc:
result = {
"error": "The object model could not be parsed. ({0})".format(str(exc))
}
return result
async def export_template(hub, ctx, name, resource_group, **kwargs):
"""
.. versionadded:: 1.0.0
Exports the template used for the specified deployment.
:param name: The name of the deployment to query.
:param resource_group: The resource group name assigned to the deployment.
CLI Example:
.. code-block:: bash
azurerm.resource.deployment.export_template testdeploy testgroup
"""
resconn = await hub.exec.azurerm.utils.get_client(ctx, "resource", **kwargs)
try:
deploy = resconn.deployments.export_template(
deployment_name=name, resource_group_name=resource_group
)
result = deploy.as_dict()
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("resource", str(exc), **kwargs)
result = {"error": str(exc)}
return result
async def list_(hub, ctx, resource_group, **kwargs):
"""
.. versionadded:: 1.0.0
List all deployments within a resource group.
CLI Example:
.. code-block:: bash
azurerm.resource.deployment.list testgroup
"""
result = {}
resconn = await hub.exec.azurerm.utils.get_client(ctx, "resource", **kwargs)
try:
deployments = await hub.exec.azurerm.utils.paged_object_to_list(
resconn.deployments.list_by_resource_group(
resource_group_name=resource_group
)
)
for deploy in deployments:
result[deploy["name"]] = deploy
except CloudError as exc:
await hub.exec.azurerm.utils.log_cloud_error("resource", str(exc), **kwargs)
result = {"error": str(exc)}
return result
```
#### File: azurerm/network/load_balancer.py
```python
from __future__ import absolute_import
from dict_tools import differ
import logging
import re
log = logging.getLogger(__name__)
TREQ = {
"present": {
"require": [
"states.azurerm.resource.group.present",
"states.azurerm.network.public_ip_address.present",
"states.azurerm.network.virtual_network.present",
"states.azurerm.network.virtual_network.subnet_present",
]
},
}
async def present(
hub,
ctx,
name,
resource_group,
sku=None,
frontend_ip_configurations=None,
backend_address_pools=None,
load_balancing_rules=None,
probes=None,
inbound_nat_rules=None,
inbound_nat_pools=None,
outbound_nat_rules=None,
tags=None,
connection_auth=None,
**kwargs,
):
"""
.. versionadded:: 1.0.0
Ensure a load balancer exists.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param sku:
The load balancer SKU, which can be 'Basic' or 'Standard'.
:param tags:
A dictionary of strings can be passed as tag metadata to the load balancer object.
:param frontend_ip_configurations:
An optional list of dictionaries representing valid FrontendIPConfiguration objects. A frontend IP
configuration can be either private (using private IP address and subnet parameters) or public (using a
reference to a public IP address object). Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``private_ip_address``: The private IP address of the IP configuration. Required if
'private_ip_allocation_method' is 'Static'.
- ``private_ip_allocation_method``: The Private IP allocation method. Possible values are: 'Static' and
'Dynamic'.
- ``subnet``: Name of an existing subnet inside of which the frontend IP will reside.
- ``public_ip_address``: Name of an existing public IP address which will be assigned to the frontend IP object.
:param backend_address_pools:
An optional list of dictionaries representing valid BackendAddressPool objects. Only the 'name' parameter is
valid for a BackendAddressPool dictionary. All other parameters are read-only references from other objects
linking to the backend address pool. Inbound traffic is randomly load balanced across IPs in the backend IPs.
:param probes:
An optional list of dictionaries representing valid Probe objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``protocol``: The protocol of the endpoint. Possible values are 'Http' or 'Tcp'. If 'Tcp' is specified, a
received ACK is required for the probe to be successful. If 'Http' is specified, a 200 OK response from the
specified URI is required for the probe to be successful.
- ``port``: The port for communicating the probe. Possible values range from 1 to 65535, inclusive.
- ``interval_in_seconds``: The interval, in seconds, for how frequently to probe the endpoint for health status.
Typically, the interval is slightly less than half the allocated timeout period (in seconds) which allows two
full probes before taking the instance out of rotation. The default value is 15, the minimum value is 5.
- ``number_of_probes``: The number of probes where if no response, will result in stopping further traffic from
being delivered to the endpoint. This values allows endpoints to be taken out of rotation faster or slower
than the typical times used in Azure.
- ``request_path``: The URI used for requesting health status from the VM. Path is required if a protocol is
set to 'Http'. Otherwise, it is not allowed. There is no default value.
:param load_balancing_rules:
An optional list of dictionaries representing valid LoadBalancingRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``load_distribution``: The load distribution policy for this rule. Possible values are 'Default', 'SourceIP',
and 'SourceIPProtocol'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values are between 0 and 65534. Note that value 0 enables 'Any Port'.
- ``backend_port``: The port used for internal connections on the endpoint. Acceptable values are between 0 and
65535. Note that value 0 enables 'Any Port'.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
- ``disable_outbound_snat``: Configures SNAT for the VMs in the backend pool to use the public IP address
specified in the frontend of the load balancing rule.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the load balancing rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the load balancing rule object.
Inbound traffic is randomly load balanced across IPs in the backend IPs.
- ``probe``: Name of the probe object used by the load balancing rule object.
:param inbound_nat_rules:
An optional list of dictionaries representing valid InboundNatRule objects. Defining inbound NAT rules on your
load balancer is mutually exclusive with defining an inbound NAT pool. Inbound NAT pools are referenced from
virtual machine scale sets. NICs that are associated with individual virtual machines cannot reference an
Inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT rule
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port``: The port for the external endpoint. Port numbers for each rule must be unique within the
Load Balancer. Acceptable values range from 1 to 65534.
- ``backend_port``: The port used for the internal endpoint. Acceptable values range from 1 to 65535.
- ``idle_timeout_in_minutes``: The timeout for the TCP idle connection. The value can be set between 4 and 30
minutes. The default value is 4 minutes. This element is only used when the protocol is set to TCP.
- ``enable_floating_ip``: Configures a virtual machine's endpoint for the floating IP capability required
to configure a SQL AlwaysOn Availability Group. This setting is required when using the SQL AlwaysOn
Availability Groups in SQL server. This setting can't be changed after you create the endpoint.
:param inbound_nat_pools:
An optional list of dictionaries representing valid InboundNatPool objects. They define an external port range
for inbound NAT to a single backend port on NICs associated with a load balancer. Inbound NAT rules are created
automatically for each NIC associated with the Load Balancer using an external port from this range. Defining an
Inbound NAT pool on your Load Balancer is mutually exclusive with defining inbound NAT rules. Inbound NAT pools
are referenced from virtual machine scale sets. NICs that are associated with individual virtual machines cannot
reference an inbound NAT pool. They have to reference individual inbound NAT rules. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the inbound NAT pool
object.
- ``protocol``: Possible values include 'Udp', 'Tcp', or 'All'.
- ``frontend_port_range_start``: The first port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65534.
- ``frontend_port_range_end``: The last port number in the range of external ports that will be used to
provide Inbound NAT to NICs associated with a load balancer. Acceptable values range between 1 and 65535.
- ``backend_port``: The port used for internal connections to the endpoint. Acceptable values are between 1 and
65535.
:param outbound_nat_rules:
An optional list of dictionaries representing valid OutboundNatRule objects. Valid parameters are:
- ``name``: The name of the resource that is unique within a resource group.
- ``frontend_ip_configuration``: Name of the frontend IP configuration object used by the outbound NAT rule
object.
- ``backend_address_pool``: Name of the backend address pool object used by the outbound NAT rule object.
Outbound traffic is randomly load balanced across IPs in the backend IPs.
- ``allocated_outbound_ports``: The number of outbound ports to be used for NAT.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure load balancer exists:
azurerm.network.load_balancer.present:
- name: lb1
- resource_group: group1
- location: eastus
- frontend_ip_configurations:
- name: lb1_feip1
public_ip_address: pub_ip1
- backend_address_pools:
- name: lb1_bepool1
- probes:
- name: lb1_webprobe1
protocol: tcp
port: 80
interval_in_seconds: 5
number_of_probes: 2
- load_balancing_rules:
- name: lb1_webprobe1
protocol: tcp
frontend_port: 80
backend_port: 80
idle_timeout_in_minutes: 4
frontend_ip_configuration: lb1_feip1
backend_address_pool: lb1_bepool1
probe: lb1_webprobe1
- tags:
contact_name: <NAME>
- connection_auth: {{ profile }}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
action = "create"
if not isinstance(connection_auth, dict):
if ctx["acct"]:
connection_auth = ctx["acct"]
else:
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
if sku:
sku = {"name": sku.capitalize()}
load_bal = await hub.exec.azurerm.network.load_balancer.get(
ctx, name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" not in load_bal:
action = "update"
# tag changes
tag_changes = differ.deep_diff(load_bal.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
# sku changes
if sku:
sku_changes = differ.deep_diff(load_bal.get("sku", {}), sku)
if sku_changes:
ret["changes"]["sku"] = sku_changes
# frontend_ip_configurations changes
if frontend_ip_configurations:
comp_ret = await hub.exec.azurerm.utils.compare_list_of_dicts(
load_bal.get("frontend_ip_configurations", []),
frontend_ip_configurations,
["public_ip_address", "subnet"],
)
if comp_ret.get("comment"):
ret["comment"] = '"frontend_ip_configurations" {0}'.format(
comp_ret["comment"]
)
return ret
if comp_ret.get("changes"):
ret["changes"]["frontend_ip_configurations"] = comp_ret["changes"]
# backend_address_pools changes
if backend_address_pools:
comp_ret = await hub.exec.azurerm.utils.compare_list_of_dicts(
load_bal.get("backend_address_pools", []), backend_address_pools
)
if comp_ret.get("comment"):
ret["comment"] = '"backend_address_pools" {0}'.format(
comp_ret["comment"]
)
return ret
if comp_ret.get("changes"):
ret["changes"]["backend_address_pools"] = comp_ret["changes"]
# probes changes
if probes:
comp_ret = await hub.exec.azurerm.utils.compare_list_of_dicts(
load_bal.get("probes", []), probes
)
if comp_ret.get("comment"):
ret["comment"] = '"probes" {0}'.format(comp_ret["comment"])
return ret
if comp_ret.get("changes"):
ret["changes"]["probes"] = comp_ret["changes"]
# load_balancing_rules changes
if load_balancing_rules:
comp_ret = await hub.exec.azurerm.utils.compare_list_of_dicts(
load_bal.get("load_balancing_rules", []),
load_balancing_rules,
["frontend_ip_configuration", "backend_address_pool", "probe"],
)
if comp_ret.get("comment"):
ret["comment"] = '"load_balancing_rules" {0}'.format(
comp_ret["comment"]
)
return ret
if comp_ret.get("changes"):
ret["changes"]["load_balancing_rules"] = comp_ret["changes"]
# inbound_nat_rules changes
if inbound_nat_rules:
comp_ret = await hub.exec.azurerm.utils.compare_list_of_dicts(
load_bal.get("inbound_nat_rules", []),
inbound_nat_rules,
["frontend_ip_configuration"],
)
if comp_ret.get("comment"):
ret["comment"] = '"inbound_nat_rules" {0}'.format(comp_ret["comment"])
return ret
if comp_ret.get("changes"):
ret["changes"]["inbound_nat_rules"] = comp_ret["changes"]
# inbound_nat_pools changes
if inbound_nat_pools:
comp_ret = await hub.exec.azurerm.utils.compare_list_of_dicts(
load_bal.get("inbound_nat_pools", []),
inbound_nat_pools,
["frontend_ip_configuration"],
)
if comp_ret.get("comment"):
ret["comment"] = '"inbound_nat_pools" {0}'.format(comp_ret["comment"])
return ret
if comp_ret.get("changes"):
ret["changes"]["inbound_nat_pools"] = comp_ret["changes"]
# outbound_nat_rules changes
if outbound_nat_rules:
comp_ret = await hub.exec.azurerm.utils.compare_list_of_dicts(
load_bal.get("outbound_nat_rules", []),
outbound_nat_rules,
["frontend_ip_configuration"],
)
if comp_ret.get("comment"):
ret["comment"] = '"outbound_nat_rules" {0}'.format(comp_ret["comment"])
return ret
if comp_ret.get("changes"):
ret["changes"]["outbound_nat_rules"] = comp_ret["changes"]
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Load balancer {0} is already present.".format(name)
return ret
if ctx["test"]:
ret["result"] = None
ret["comment"] = "Load balancer {0} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"sku": sku,
"tags": tags,
"frontend_ip_configurations": frontend_ip_configurations,
"backend_address_pools": backend_address_pools,
"load_balancing_rules": load_balancing_rules,
"probes": probes,
"inbound_nat_rules": inbound_nat_rules,
"inbound_nat_pools": inbound_nat_pools,
"outbound_nat_rules": outbound_nat_rules,
},
}
if ctx["test"]:
ret["comment"] = "Load balancer {0} would be created.".format(name)
ret["result"] = None
return ret
lb_kwargs = kwargs.copy()
lb_kwargs.update(connection_auth)
load_bal = await hub.exec.azurerm.network.load_balancer.create_or_update(
ctx=ctx,
name=name,
resource_group=resource_group,
sku=sku,
tags=tags,
frontend_ip_configurations=frontend_ip_configurations,
backend_address_pools=backend_address_pools,
load_balancing_rules=load_balancing_rules,
probes=probes,
inbound_nat_rules=inbound_nat_rules,
inbound_nat_pools=inbound_nat_pools,
outbound_nat_rules=outbound_nat_rules,
**lb_kwargs,
)
if "error" not in load_bal:
ret["result"] = True
ret["comment"] = f"Load balancer {name} has been {action}d."
return ret
ret["comment"] = "Failed to {0} load balancer {1}! ({2})".format(
action, name, load_bal.get("error")
)
if not ret["result"]:
ret["changes"] = {}
return ret
async def absent(hub, ctx, name, resource_group, connection_auth=None, **kwargs):
"""
.. versionadded:: 1.0.0
Ensure a load balancer does not exist in the resource group.
:param name:
Name of the load balancer.
:param resource_group:
The resource group assigned to the load balancer.
:param connection_auth:
A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
if ctx["acct"]:
connection_auth = ctx["acct"]
else:
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
load_bal = await hub.exec.azurerm.network.load_balancer.get(
ctx, name, resource_group, azurerm_log_level="info", **connection_auth
)
if "error" in load_bal:
ret["result"] = True
ret["comment"] = "Load balancer {0} was not found.".format(name)
return ret
if ctx["test"]:
ret["comment"] = "Load balancer {0} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": load_bal,
"new": {},
}
return ret
deleted = await hub.exec.azurerm.network.load_balancer.delete(
ctx, name, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Load balancer {0} has been deleted.".format(name)
ret["changes"] = {"old": load_bal, "new": {}}
return ret
ret["comment"] = "Failed to delete load balancer {0}!".format(name)
return ret
```
#### File: azurerm/storage/account.py
```python
from __future__ import absolute_import
from dict_tools import differ
import logging
log = logging.getLogger(__name__)
TREQ = {"present": {"require": ["states.azurerm.resource.group.present",]}}
async def present(
hub,
ctx,
name,
resource_group,
sku,
kind,
location,
custom_domain=None,
encryption=None,
network_rule_set=None,
access_tier=None,
https_traffic_only=None,
is_hns_enabled=None,
tags=None,
connection_auth=None,
**kwargs,
):
"""
.. versionadded:: 2.0.0
Ensure a storage account exists in the resource group.
:param name: The name of the storage account being created. Storage account names must be between 3 and 24
characters in length and use numbers and lower-case letters only.
:param resource_group: The name of the resource group that the storage account belongs to.
:param sku: The name of the storage account SKU. Possible values include: 'Standard_LRS', 'Standard_GRS',
'Standard_RAGRS', 'Standard_ZRS', 'Premium_LRS', 'Premium_ZRS', 'Standard_GZRS', and 'Standard_RAGZRS'.
:param kind: Indicates the type of storage account. Possible values include: 'Storage', 'StorageV2', 'BlobStorage'.
:param location: Gets or sets the location of the resource. This will be one of the supported and registered Azure
Geo Regions (e.g. West US, East US, Southeast Asia, etc.). The geo region of a resource cannot be changed once
it is created, but if an identical geo region is specified on update, the request will succeed.
:param custom_domain: User domain assigned to the storage account. Valid parameters are:
- ``name``: Required. Gets or sets the custom domain name assigned to the storage account. Name is the CNAME
source. To clear the existing custom domain, use an empty string for this property.
- ``use_sub_domain_name``: Indicates whether indirect CName validation is enabled. Default value is false.
This should only be set on updates.
:param encryption: Provides the encryption settings on the account. If left unspecified the account encryption
settings will remain the same. The default setting is unencrypted.
:param network_rule_set: A dictionary representing a NetworkRuleSet object.
:param access_tier: The access tier is used for billing. Required for when the kind is set to 'BlobStorage'.
Possible values include: 'Hot' and 'Cool'.
:param https_traffic_only: Allows https traffic only to storage service if set to True. The default value
is False.
:param is_hns_enabled: Account HierarchicalNamespace enabled if set to True. The default value is False.
:param tags: A dictionary of strings can be passed as tag metadata to the storage account object.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure storage account exists:
azurerm.storage.account.present:
- name: my_account
- resource_group: my_rg
- sku: 'Standard_LRS'
- kind: 'Storage'
- location: 'eastus'
- tags:
contact_name: <NAME>
- connection_auth: {{ profile }}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
action = "create"
if not isinstance(connection_auth, dict):
if ctx["acct"]:
connection_auth = ctx["acct"]
else:
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
account = await hub.exec.azurerm.storage.account.get_properties(
ctx, name, resource_group, **connection_auth
)
if "error" not in account:
action = "update"
tag_changes = differ.deep_diff(account.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
if sku != account.get("sku").get("name"):
ret["changes"]["sku"] = {"old": account.get("sku").get("name"), "new": sku}
if kind != account.get("kind"):
ret["changes"]["kind"] = {"old": account.get("kind"), "new": kind}
if https_traffic_only is not None:
if https_traffic_only != account.get("enable_https_traffic_only"):
ret["changes"]["enable_https_traffic_only"] = {
"old": account.get("enable_https_traffic_only"),
"new": https_traffic_only,
}
if is_hns_enabled is not None:
if is_hns_enabled != account.get("is_hns_enabled"):
ret["changes"]["is_hns_enabled"] = {
"old": account.get("is_hns_enabled"),
"new": is_hns_enabled,
}
if network_rule_set:
rule_set_changes = differ.deep_diff(
account.get("network_rule_set", {}), network_rule_set or {}
)
if rule_set_changes:
ret["changes"]["network_rule_set"] = rule_set_changes
if encryption:
encryption_changes = differ.deep_diff(
account.get("encryption", {}), encryption or {}
)
if encryption_changes:
ret["changes"]["encryption"] = encryption_changes
# The Custom Domain can only be added on once, so if it already exists then this cannot be changed
if custom_domain:
domain_changes = differ.deep_diff(
account.get("custom_domain", {}), custom_domain or {}
)
if domain_changes:
ret["changes"]["custom_domain"] = domain_changes
if access_tier:
if access_tier != account.get("access_tier"):
ret["changes"]["access_tier"] = {
"old": account.get("access_tier"),
"new": access_tier,
}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Storage account {0} is already present.".format(name)
return ret
if ctx["test"]:
ret["result"] = None
ret["comment"] = "Storage account {0} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"resource_group": resource_group,
"sku": sku,
"kind": kind,
"location": location,
},
}
if tags:
ret["changes"]["new"]["tags"] = tags
if access_tier:
ret["changes"]["new"]["access_tier"] = access_tier
if custom_domain:
ret["changes"]["new"]["custom_domain"] = custom_domain
if encryption:
ret["changes"]["new"]["encryption"] = encryption
if network_rule_set:
ret["changes"]["new"]["network_rule_set"] = network_rule_set
if https_traffic_only is not None:
ret["changes"]["new"]["enable_https_traffic_only"] = https_traffic_only
if is_hns_enabled is not None:
ret["changes"]["new"]["is_hns_enabled"] = is_hns_enabled
if ctx["test"]:
ret["comment"] = "Storage account {0} would be created.".format(name)
ret["result"] = None
return ret
account_kwargs = kwargs.copy()
account_kwargs.update(connection_auth)
account = await hub.exec.azurerm.storage.account.create(
ctx=ctx,
name=name,
resource_group=resource_group,
tags=tags,
sku=sku,
kind=kind,
location=location,
custom_domain=custom_domain,
encryption=encryption,
network_rule_set=network_rule_set,
access_tier=access_tier,
https_traffic_only=https_traffic_only,
is_hns_enabled=is_hns_enabled,
**account_kwargs,
)
if "error" not in account:
ret["result"] = True
ret["comment"] = f"Storage account {name} has been {action}d."
return ret
ret["comment"] = "Failed to {0} storage acccount {1}! ({2})".format(
action, name, account.get("error")
)
if not ret["result"]:
ret["changes"] = {}
return ret
async def absent(hub, ctx, name, resource_group, connection_auth=None, **kwargs):
"""
.. versionadded:: 2.0.0
Ensure a storage account does not exist in the resource group.
:param name: The name of the storage account being deleted.
:param resource_group: The name of the resource group that the storage account belongs to.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure storage account does not exist:
azurerm.storage.account.absent:
- name: my_account
- resource_group: my_rg
- connection_auth: {{ profile }}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
if ctx["acct"]:
connection_auth = ctx["acct"]
else:
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
account = await hub.exec.azurerm.storage.account.get_properties(
ctx, name, resource_group, **connection_auth
)
if "error" in account:
ret["result"] = True
ret["comment"] = "Storage account {0} was not found.".format(name)
return ret
if ctx["test"]:
ret["comment"] = "Storage account {0} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": account,
"new": {},
}
return ret
deleted = await hub.exec.azurerm.storage.account.delete(
ctx, name, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Storage account {0} has been deleted.".format(name)
ret["changes"] = {"old": account, "new": {}}
return ret
ret["comment"] = "Failed to delete storage account {0}!".format(name)
return ret
```
#### File: azurerm/storage/container.py
```python
from __future__ import absolute_import
from dict_tools import differ
import logging
log = logging.getLogger(__name__)
TREQ = {
"present": {
"require": [
"states.azurerm.resource.group.present",
"states.azurerm.storage.account.present",
]
}
}
async def present(
hub,
ctx,
name,
account,
resource_group,
public_access=None,
metadata=None,
tags=None,
connection_auth=None,
**kwargs,
):
"""
.. versionadded:: 2.0.0
Ensure a blob container exists.
:param name: The name of the blob container within the specified storage account. Blob container names must be
between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
character must be immediately preceded and followed by a letter or number.
:param account: The name of the storage account within the specified resource group. Storage account names must be
between 3 and 24 characters in length and use numbers and lower-case letters only.
:param resource_group: The name of the resource group within the user's subscription. The name is case insensitive.
:param public_access: Specifies whether data in the container may be accessed publicly and the level of access.
Possible values include: 'Container', 'Blob', 'None'. Defaults to None.
:param metadata: A dictionary of name-value pairs to associate with the container as metadata. Defaults to None.
:param tags: A dictionary of strings can be passed as tag metadata to the container object.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure blob container exists:
azurerm.storage.container.present:
- name: my_container
- account: my_account
- resource_group: my_rg
- public_access: 'Blob'
- tags:
contact_name: <NAME>
- connection_auth: {{ profile }}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
action = "create"
if not isinstance(connection_auth, dict):
if ctx["acct"]:
connection_auth = ctx["acct"]
else:
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
container = await hub.exec.azurerm.storage.container.get(
ctx, name, account, resource_group, **connection_auth
)
if "error" not in container:
action = "update"
tag_changes = differ.deep_diff(container.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
metadata_changes = differ.deep_diff(
container.get("metadata", {}), metadata or {}
)
if metadata_changes:
ret["changes"]["metadata"] = metadata_changes
if public_access and public_access != container.get("public_access"):
ret["changes"]["public_access"] = {
"old": container.get("public_access"),
"new": public_access,
}
if not ret["changes"]:
ret["result"] = True
ret["comment"] = "Blob container {0} is already present.".format(name)
return ret
if ctx["test"]:
ret["result"] = None
ret["comment"] = "Blob container {0} would be updated.".format(name)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"account": account,
"resource_group": resource_group,
},
}
if tags:
ret["changes"]["new"]["tags"] = tags
if public_access:
ret["changes"]["new"]["public_access"] = public_access
if metadata:
ret["changes"]["new"]["metadata"] = metadata
if ctx["test"]:
ret["comment"] = "Blob container {0} would be created.".format(name)
ret["result"] = None
return ret
container_kwargs = kwargs.copy()
container_kwargs.update(connection_auth)
if action == "create":
container = await hub.exec.azurerm.storage.container.create(
ctx=ctx,
name=name,
account=account,
resource_group=resource_group,
tags=tags,
public_access=public_access,
metadata=metadata,
**container_kwargs,
)
else:
container = await hub.exec.azurerm.storage.container.update(
ctx=ctx,
name=name,
account=account,
resource_group=resource_group,
tags=tags,
public_access=public_access,
metadata=metadata,
**container_kwargs,
)
if "error" not in container:
ret["result"] = True
ret["comment"] = f"Blob container {name} has been {action}d."
return ret
ret["comment"] = "Failed to {0} blob container {1}! ({2})".format(
action, name, container.get("error")
)
if not ret["result"]:
ret["changes"] = {}
return ret
async def immutability_policy_present(
hub,
ctx,
name,
account,
resource_group,
immutability_period,
if_match=None,
tags=None,
connection_auth=None,
**kwargs,
):
"""
.. versionadded:: 2.0.0
Ensures that the immutability policy of a specified blob container exists. The container must be of account kind
'StorageV2' in order to utilize an immutability policy.
:param name: The name of the blob container within the specified storage account. Blob container names must be
between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
character must be immediately preceded and followed by a letter or number.
:param account: The name of the storage account within the specified resource group. Storage account names must be
between 3 and 24 characters in length and use numbers and lower-case letters only.
:param resource_group: The name of the resource group within the user's subscription. The name is case insensitive.
:param immutability_period: The immutability period for the blobs in the container since the policy
creation, in days.
:param if_match: The entity state (ETag) version of the immutability policy to update. It is important to note that
the ETag must be passed as a string that includes double quotes. For example, '"8d7b4bb4d393b8c"' is a valid
string to pass as the if_match parameter, but "8d7b4bb4d393b8c" is not. Defaults to None.
:param tags: A dictionary of strings can be passed as tag metadata to the container object.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure immutability policy exists:
azurerm.storage.container.immutability_policy_present:
- name: my_container
- account: my_account
- resource_group: my_rg
- immutability_period: 10
- tags:
contact_name: <NAME>
- connection_auth: {{ profile }}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
action = "create"
if not isinstance(connection_auth, dict):
if ctx["acct"]:
connection_auth = ctx["acct"]
else:
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
policy = await hub.exec.azurerm.storage.container.get_immutability_policy(
ctx, name, account, resource_group, if_match, **connection_auth
)
if "error" not in policy:
action = "update"
tag_changes = differ.deep_diff(policy.get("tags", {}), tags or {})
if tag_changes:
ret["changes"]["tags"] = tag_changes
if immutability_period != policy.get(
"immutability_period_since_creation_in_days"
):
ret["changes"]["immutability_period_since_creation_in_days"] = {
"old": policy.get("immutability_period_since_creation_in_days"),
"new": immutability_period,
}
if not ret["changes"]:
ret["result"] = True
ret[
"comment"
] = "The immutability policy of the blob container {0} is already present.".format(
name
)
return ret
if ctx["test"]:
ret["result"] = None
ret[
"comment"
] = "The immutability policy of the blob container {0} would be updated.".format(
name
)
return ret
else:
ret["changes"] = {
"old": {},
"new": {
"name": name,
"account": account,
"resource_group": resource_group,
"immutability_period_since_creation_in_days": immutability_period,
},
}
if tags:
ret["changes"]["new"]["tags"] = tags
if if_match:
ret["changes"]["new"]["if_match"] = if_match
if ctx["test"]:
ret[
"comment"
] = "The immutability policy of the blob container {0} would be created.".format(
name
)
ret["result"] = None
return ret
policy_kwargs = kwargs.copy()
policy_kwargs.update(connection_auth)
policy = await hub.exec.azurerm.storage.container.create_or_update_immutability_policy(
ctx=ctx,
name=name,
account=account,
resource_group=resource_group,
tags=tags,
if_match=if_match,
immutability_period=immutability_period,
**policy_kwargs,
)
if "error" not in policy:
ret["result"] = True
ret[
"comment"
] = f"The immutability policy of the blob container {name} has been {action}d."
return ret
ret[
"comment"
] = "Failed to {0} the immutability policy of the blob container {1}! ({2})".format(
action, name, policy.get("error")
)
if not ret["result"]:
ret["changes"] = {}
return ret
async def absent(
hub, ctx, name, account, resource_group, connection_auth=None, **kwargs
):
"""
.. versionadded:: 2.0.0
Ensures a specified blob container does not exist.
:param name: The name of the blob container within the specified storage account. Blob container names must be
between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
character must be immediately preceded and followed by a letter or number.
:param account: The name of the storage account within the specified resource group. Storage account names must be
between 3 and 24 characters in length and use numbers and lower-case letters only.
:param resource_group: The name of the resource group within the user's subscription. The name is case insensitive.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure blob container is absent:
azurerm.storage.container.absent:
- name: my_container
- account: my_account
- resource_group: my_rg
- connection_auth: {{ profile }}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
if ctx["acct"]:
connection_auth = ctx["acct"]
else:
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
container = await hub.exec.azurerm.storage.container.get(
ctx, name, account, resource_group, **connection_auth
)
if "error" in container:
ret["result"] = True
ret["comment"] = "Blob container {0} was not found.".format(name)
return ret
if ctx["test"]:
ret["comment"] = "Blob container {0} would be deleted.".format(name)
ret["result"] = None
ret["changes"] = {
"old": container,
"new": {},
}
return ret
deleted = await hub.exec.azurerm.storage.container.delete(
ctx, name, account, resource_group, **connection_auth
)
if deleted:
ret["result"] = True
ret["comment"] = "Blob container {0} has been deleted.".format(name)
ret["changes"] = {"old": container, "new": {}}
return ret
ret["comment"] = "Failed to delete blob container {0}!".format(name)
return ret
async def immutability_policy_absent(
hub,
ctx,
name,
account,
resource_group,
if_match=None,
connection_auth=None,
**kwargs,
):
"""
.. versionadded:: 2.0.0
Ensures that the immutability policy of a specified blob container does not exist.
:param name: The name of the blob container within the specified storage account. Blob container names must be
between 3 and 63 characters in length and use numbers, lower-case letters and dash (-) only. Every dash (-)
character must be immediately preceded and followed by a letter or number.
:param account: The name of the storage account within the specified resource group. Storage account names must be
between 3 and 24 characters in length and use numbers and lower-case letters only.
:param resource_group: The name of the resource group within the user's subscription. The name is case insensitive.
:param if_match: The entity state (ETag) version of the immutability policy to update. It is important to note that
the ETag must be passed as a string that includes double quotes. For example, '"8d7b4bb4d393b8c"' is a valid
string to pass as the if_match parameter, but "8d7b4bb4d393b8c" is not. Defaults to None.
:param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the
Azure Resource Manager API.
Example usage:
.. code-block:: yaml
Ensure immutability policy is absent:
azurerm.storage.container.absent:
- name: my_container
- account: my_account
- resource_group: my_rg
- if_match: '"my_etag"'
- connection_auth: {{ profile }}
"""
ret = {"name": name, "result": False, "comment": "", "changes": {}}
if not isinstance(connection_auth, dict):
if ctx["acct"]:
connection_auth = ctx["acct"]
else:
ret[
"comment"
] = "Connection information must be specified via acct or connection_auth dictionary!"
return ret
policy = await hub.exec.azurerm.storage.container.get_immutability_policy(
ctx, name, account, resource_group, if_match, **connection_auth
)
if "error" in policy:
ret["result"] = True
ret[
"comment"
] = "The immutability policy of the blob container {0} was not found.".format(
name
)
return ret
if ctx["test"]:
ret[
"comment"
] = "The immutability policy of the blob container {0} would be deleted.".format(
name
)
ret["result"] = None
ret["changes"] = {
"old": policy,
"new": {},
}
return ret
if not if_match:
if_match = policy.get("etag")
deleted = await hub.exec.azurerm.storage.container.delete_immutability_policy(
ctx, name, account, resource_group, if_match, **connection_auth
)
if deleted:
ret["result"] = True
ret[
"comment"
] = "The immutability policy of the blob container {0} has been deleted.".format(
name
)
ret["changes"] = {"old": policy, "new": {}}
return ret
ret[
"comment"
] = "Failed to delete the immutability policy of the blob container {0}!".format(
name
)
return ret
```
#### File: azurerm/resource/test_group.py
```python
import idem_azurerm.exec.azurerm.resource.group as group
import pytest
@pytest.mark.asyncio
async def test_get(mock_hub):
"""
"""
# await group.get(mock_hub, "name")
# mock_hub.exec.utils.azurerm.log_cloud_error.assert_called_once_with("resource")
``` |
{
"source": "johnopana/Flask-Blogh",
"score": 3
} |
#### File: app/main/views.py
```python
from flask import render_template,url_for,request,abort,redirect
from ..models import Blogpost,User,Comments
from .import main
from .forms import CommentForm,PostForm,UpdateProfile
from flask_login import login_required,current_user
from app import db
@main.route('/')
def index():
'''a function to take a user to the home page'''
posts=Blogpost.query.all()
title="Welcome To New Post-Blogh App"
return render_template('index.html',title=title,posts=posts)
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update',methods = ['GET','POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile',uname=user.username))
return render_template('profile/update.html',form =form)
@main.route('/post',methods=['GET','POST'])
@login_required
def post():
form=PostForm()
if form.validate_on_submit():
title=form.title.data
content=form.content.data
new_post=Blogpost(title=title,content=content)
new_post.post_save()
return redirect(url_for('main.index'))
title='make a new blog here'
return render_template('post.html',title=title,postform=form)
@main.route('/comment/<int:id>')
def view_comment(id):
post=Blogpost.query.get(id)
title='posts'
return render_template('comment.html',title=title,post=post)
@main.route('/post/new_comment/<int:id>',methods=['GET','POST'])
def new_comment(id):
post=Blogpost.query.get(id)
form=CommentForm()
if form.validate_on_submit():
content=form.content.data
new_comment=Comments(content=content,blogpost=post)
new_comment.comment_save()
return redirect(url_for('main.view_comment',id=post.id))
title='make a new comment here'
return render_template('new_comment.html',title=title,post=post,commentform=form)
@main.route('/post/comment/delete/<int:id>')
def delete_comment(id):
post=Blogpost.query.get(id)
pass
``` |
{
"source": "johnopana/Istagram-App",
"score": 3
} |
#### File: Istagram-App/photos/tests.py
```python
from django.test import TestCase
from .models import Profile, Image, User, Comments
class ProfileTest(TestCase):
def setUp(self):
self.new_user = User(username='John', email='<EMAIL>', password='<PASSWORD>')
self.new_user.save()
self.new_profile = Profile(photo='image.png', bio='generous', user=self.new_user)
def test_instance(self):
self.assertTrue(isinstance(self.new_profile, Profile))
def test_save_method(self):
self.new_profile.save_profile()
profile = Profile.objects.all()
self.assertTrue(len(profile)>0)
def test_delete_method(self):
self.new_profile.save_profile()
self.new_profile.delete_profile()
profile = Profile.objects.all()
self.assertTrue(len(profile)==0)
# def test_update_profile(self):
# self.new_profile.save_profile()
# self.new_profile.update_bio(self.new_profile.id,'mySelf')
# updated_bio = Profile.objects.filter(bio="mySelf")
# self.assertTrue(len(updated_bio)>0)
class ImageTest(TestCase):
def setUp(self):
self.new_user = User(username='John', email='<EMAIL>', password='<PASSWORD>')
self.new_user.save()
self.new_profile = Profile(photo='image.png', bio='generous', user=self.new_user)
self.new_profile.save()
self.new_image = Image(name='Moringa', image='moringa.jpg', caption='wonderful place to be', profile=self.new_user, like_add=0)
def test_instance(self):
self.assertTrue(isinstance(self.new_image,Image))
def test_save_image(self):
self.new_image.save_image()
image = Image.objects.all()
self.assertTrue(len(image)>0)
def test_delete_image(self):
self.new_image.save_image()
self.new_image.delete_image()
image = Image.objects.all()
self.assertTrue(len(image)==0)
class CommentsTest(TestCase):
def setUp(self):
self.new_user = User(username='John', email='<EMAIL>', password='<PASSWORD>')
self.new_user.save()
self.new_image = Image(name='Moringa', image='moringa.jpg', caption='wonderful place to be', profile=self.new_user, like_add=0)
self.new_image.save()
self.new_comment = Comments(comment='This is a beautiful place',image=self.new_image,user=self.new_user)
def test_instance(self):
self.assertTrue(isinstance(self.new_comment, Comments))
def test_save_comment(self):
self.new_comment.save_comment()
comment = Comments.objects.all()
self.assertTrue(len(comment)>0)
def test_delete_comment(self):
self.new_comment.save_comment()
self.new_comment.delete_comment()
comment = Comments.objects.all()
self.assertTrue(len(comment)==0)
``` |
{
"source": "johnopana/Rental_App",
"score": 3
} |
#### File: Rental_App/houses/tests.py
```python
from django.test import TestCase
from .models import Profile,House,Owner
class ProfileTestClass(TestCase):
#Set up method
def setUp(self):
self.new_profile = Profile(user_id=2,name="Titus",bio="just testing", email='<EMAIL>',profile_pic="image.jpeg")
# Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.new_profile,Profile))
def test_save_method(self):
self.new_profile.save_profile()
profile = Profile.objects.all()
self.assertTrue(len(profile)>0)
def test_delete_method(self):
self.new_profile.save_profile()
self.new_profile.delete_profile()
profile = Profile.objects.all()
self.assertTrue(len(profile)==0)
def tearDown(self):
Profile.objects.all().delete()
class HouseTestClass(TestCase):
def setUp(self):
self.new_owner=Owner(first_name='Peter',last_name='Okumu',address="mwmama", email="<EMAIL>",phone='0789456765', owner_pic='owner.png')
self.new_owner.save()
self.new_house=House(house_no='3F',registry_no="938348H",house_location="lavington",house_pic='image.png',house_type="rental", no_of_rooms=5, price=22000, owner=self.new_owner)
def test_instance(self):
self.assertTrue(isinstance(self.new_house,House))
def test_save_house(self):
self.new_house.save_house()
house = House.objects.all()
self.assertTrue(len(house)>0)
def test_delete_house(self):
self.new_house.save_house()
self.new_house.delete_house()
house = House.objects.all()
self.assertTrue(len(house)==0)
def test_update_house_type_method(self):
self.new_house.save_house()
new_name = 'rental'
update = self.new_house.update_house(self.new_house.id,new_name)
self.assertEqual(update,new_name)
def test_find_method(self):
self.new_house.save_house()
house = self.new_house.find_house(self.new_house.id)
self.assertEqual(house.house_location,'lavington')
def tearDown(self):
House.objects.all().delete()
class OwnerTestClass(TestCase):
def setUp(self):
self.new_owner=Owner(first_name='Peter',last_name='Okumu',address="mwmama", email="<EMAIL>",phone='0789456765', owner_pic='owner.png')
def test_instance(self):
self.assertTrue(isinstance(self.new_owner,Owner))
def test_save_owner(self):
self.new_owner.save_owner()
owner = Owner.objects.all()
self.assertTrue(len(owner)>0)
def test_delete_owner(self):
self.new_owner.save_owner()
self.new_owner.delete_owner()
owner = Owner.objects.all()
self.assertTrue(len(owner) is 0)
def tearDown(self):
Owner.objects.all().delete()
```
#### File: Rental_App/houses/views.py
```python
from django.shortcuts import render,redirect
from .models import *
from django.contrib.auth.decorators import login_required
from .forms import ProfileForm
def index(request):
return render(request, 'main/index.html')
@login_required(login_url='/accounts/login/?next=/')
def houses(request):
houses = House.objects.all
return render(request,'main/houses.html', {'houses':houses})
@login_required(login_url='/accounts/login/?next=/')
def profile(request):
current_user = request.user
profile = Profile.objects.filter(user=current_user).first()
return render(request, 'main/profile.html', { "profile": profile, 'current_user':current_user})
def update_profile(request):
user_profile = Profile.objects.get(user=request.user)
if request.method == "POST":
form = ProfileForm(request.POST,request.FILES,instance=request.user.profile)
if form.is_valid():
form.save()
return redirect('profile')
else:
form = ProfileForm(instance=request.user.profile)
return render(request,'main/update_profile.html', {'form':form})
@login_required(login_url='/accounts/login/?next=/')
def details(request,id):
house = House.objects.get(id=id)
# owner = house.owner_set.all
return render(request, 'main/details.html', {'house':house})
def search_results(request):
if 'house' in request.GET and request.GET["house"]:
search_term = request.GET.get("house")
searched_houses = House.search(search_term)
message = f'{search_term}'
return render(request, 'search.html',{"message":message, "houses":searched_houses})
else:
messsage = "You haven't searched for any term"
return render(request, 'search.html', {"message":message})
``` |
{
"source": "johnoseni1/Diamond-Programming-language",
"score": 2
} |
#### File: johnoseni1/Diamond-Programming-language/rundiamond.py
```python
import functions as modulefunctions
def run_function(index, python_funcs_and_vars, lines, line, _functions, variables):
line = line.split(' ')
func_name = line[0]
del line[0]
modulefunctions.execute_code(
func_name,
line,
variables,
index,
lines,
_functions,
python_funcs_and_vars
)
def run_main(line, variables, index, lines, _functions_dict, python_funcs_and_vars):
run_function(line, variables, index, lines, _functions_dict, python_funcs_and_vars)
del lines[index]
``` |
{
"source": "JohnOSu/sylph",
"score": 3
} |
#### File: src/sylph_proj/data_obj.py
```python
import json
import enum
from json import JSONDecodeError
from requests import Response
class SylphDataGenerator(enum.Enum):
AUTOMATION_CODE = "automation"
API_REQUEST = "api_request"
APP_UI_INSPECTION = "app"
class SylphDataDict(dict):
data_generator: SylphDataGenerator
def __init__(self, data_source: SylphDataGenerator, data: dict):
super().__init__()
self.data_generator = data_source
for keyname in data.keys():
self[keyname] = data[keyname]
class SylphObjectInitException(Exception):
pass
class SylphDataObject:
data_generator: SylphDataGenerator
def __init__(self, response: Response = None, data: SylphDataDict = None):
# Store init arg in self._src. If arg is response, transform into a dict.
if response is not None and data is not None:
raise SylphObjectInitException("Must be either a Response or a SylphDataDict")
if response is None:
if data is None:
# must be defining function calls
self._src = []
self.data_generator = SylphDataGenerator.AUTOMATION_CODE
else:
if not hasattr(data, 'data_generator'):
raise SylphObjectInitException("If data is provided, it must be a SylphDataDict")
self._src = data
self.data_generator = data.data_generator
else:
self.data_generator = SylphDataGenerator.API_REQUEST
self._src = json.loads(response.content.decode('utf-8'))
def dump_data(self):
data = {}
for key in self._src.keys():
data[key] = self._src[key].dump_data() if issubclass(type(self._src[key]), SylphDataObject) else self._src[key]
return data
class SylphCollectionDataObject(SylphDataObject):
def __init__(self, response: Response = None, data: SylphDataDict = None):
super().__init__(response, data)
self._items = []
def __getitem__(self, idx):
return self._items[idx]
@property
def items(self) -> []:
return self._items
@property
def count(self):
return len(self._items)
class ResponseError(SylphDataObject):
def __init__(self, response: Response = None, data: SylphDataDict = None):
try:
super().__init__(response=response, data=data)
except JSONDecodeError:
self._src = {}
self._src['errorCode'] = response.status_code if hasattr(response, 'status_code') else None
self._src['errorMessage'] = response.reason if hasattr(response, 'reason') else None
self.ok: bool = False
self.error_code = self._src['errorCode'] if 'errorCode' in self._src else None
self.error_message = self._src['errorMessage'] if 'errorMessage' in self._src else None
if not self.error_message:
self.error_message = response.text if hasattr(response, 'text') else None
self.status_code = response.status_code if hasattr(response, 'status_code') else self.error_code
self.reason = response.reason if hasattr(response, 'reason') else None
class ContractViolation(SylphDataObject):
def __init__(self, response: Response = None, data: SylphDataDict = None):
super().__init__(response=response, data=data)
self.dto_name = self._src['dto_name']
self.dto_path = self._src['dto_path']
self.dto_exc = self._src['dto_exc']
```
#### File: src/sylph_proj/pages.py
```python
import time
import logging
from abc import ABCMeta, abstractmethod
from appium.webdriver.webdriver import WebDriver as AppiumDriver
from selenium.webdriver.remote.webdriver import WebDriver as SeleniumDriver
from .sylphsession import SylphSessionConfig
from .wrappers import WebTestWrapper
from .wrappers import MobileTestWrapper
class BasePage(metaclass=ABCMeta):
log: logging.Logger
config: SylphSessionConfig
SWIPE_UP = 'up'
SWIPE_DOWN = 'down'
SWIPE_LEFT = 'left'
SWIPE_RIGHT = 'right'
def __init__(self, tw):
self.config = tw.config
self.log = tw.log
self.page_name = self.__class__.__name__
@abstractmethod
def _locator(self, *args):
pass
def _is_done_loading(self, locator_elem) -> bool:
self.log.info(f'{self.page_name} is loading...')
is_ready = self.is_element_available(locator_elem, name=self.page_name)
if is_ready:
self.log.info(f'{self.page_name} is available')
return is_ready
def is_element_available(self, elem, wait=30, name=None) -> bool:
"""Repeated safe check for the specified wait time (seconds) until the element is displayed and enabled.
If not found, return false.
Args:
:param elem: A lambda function that returns a webelement.
:param wait: (Default:10) The wait time in seconds for the process to complete.
:param name: (Optional) A name describing the webelement for logging purposes.
Returns:
True if element is displayed.
"""
e = None
beginning = time.time()
for w in range(0, wait):
try:
e = elem()
action_displayed = e.is_displayed
action_enabled = e.is_enabled
if action_displayed() and action_enabled() is True:
break
except:
pass
time.sleep(1)
since = time.time()
span = self.span(since, beginning)
span_msg = f'Elapsed seconds: {span}'
wait_msg = f'Waiting for {name}'
wait_msg = f'{wait_msg}: {name} | {span_msg}' if name else f'{wait_msg}. | {span_msg}'
self.log.debug(wait_msg)
if span >= wait:
wait_msg = f'{name} not found' if name else 'Element not found'
self.log.debug(wait_msg)
return False
msg = 'Found Element'
self.log.debug(f'{msg}: {e.id}' if e else msg)
return True
def wait_for_condition(self, condition, wait=10):
"""Process an action repeatedly for the specified wait time (seconds) until it returns true.
Args:
:param condition: A function that returns a bool.
:param wait: The wait time for the process to complete.
Throws:
TimeoutError if the action is not true within the specified wait time (seconds)
"""
beginning = time.time()
for w in range(0, wait):
try:
if condition() is True:
break
except:
pass
time.sleep(1)
since = time.time()
span = self.span(since, beginning)
action_name = condition.__name__
if hasattr(condition, '__self__'):
container_name = condition.__self__.__class__.__name__
self.log.debug(f'Waiting for {container_name}.{action_name}() | Elapsed seconds: {span}')
else:
self.log.debug(f'Waiting for {action_name}() | Elapsed seconds: {span}')
if span >= wait:
self.log.debug(f'Condition was not met: Elapsed seconds: {span}')
raise TimeoutError('The condition was not met within the expected time span.')
def span(self, since, beginning):
"""Calculate an integral span.
Args:
:param since: The end point since beginning.
:param beginning: The beginning.
Returns:
The absolute span between two numbers as an integer.
"""
span = beginning - since
return abs(int(f"{span:.0f}"))
class BasePageWeb(BasePage):
driver: SeleniumDriver
def __init__(self, tw: WebTestWrapper, locator_elem, with_validation):
self._tw = tw
super().__init__(tw)
self.driver = tw.driver
if with_validation and not self._is_done_loading(locator_elem):
raise Exception("Web.PAGE_LOAD")
class BasePageMobile(BasePage):
driver: AppiumDriver
def __init__(self, tw: MobileTestWrapper, locator_elem, with_validation):
self._tw = tw
super().__init__(tw)
self.driver = tw.driver
if with_validation and not self._is_done_loading(locator_elem):
raise Exception("App.PAGE_LOAD")
def try_find_element(self, locator, max_swipes=6, swipe_dir=BasePage.SWIPE_UP, name=None):
"""Repeated swipe action (default:up) for the specified number of attempts or until the element is found.
If not found, no consequences.
Args:
:param locator: A lambda function that returns a webelement.
:param max_swipes: The max number of swipes to attempt
:param swipe_dir: 'up' to reveal elements below, 'down' to reveal elements above
:param name: (Optional) A name describing the webelement for logging purposes.
"""
located = self.is_element_available(lambda: locator(), 2, name)
attempts = 0
while not located:
attempts +=1
self.log.info(f'Swiping: {swipe_dir}')
if swipe_dir is BasePage.SWIPE_UP:
self.swipe_up()
elif swipe_dir is BasePage.SWIPE_DOWN:
self.swipe_down()
elif swipe_dir is BasePage.SWIPE_LEFT:
self.swipe_left()
else:
self.swipe_right()
located = self.is_element_available(lambda: locator(), 2, name)
if attempts >= max_swipes:
break
def swipe_up(self):
if self.config.is_ios:
self.driver.swipe(50, 350, 50, 310, 400)
else:
self.driver.swipe(100, 1000, 100, 845, 400)
def swipe_down(self):
if self.config.is_ios:
self.driver.swipe(50, 310, 50, 350, 400)
else:
self.driver.swipe(100, 845, 100, 1000, 400)
def swipe_left(self):
if self.config.is_ios:
self.driver.swipe(300, 250, 80, 250, 400)
else:
self.driver.swipe(600, 800, 500, 800, 400)
def swipe_right(self):
if self.config.is_ios:
self.driver.swipe(80, 250, 300, 250, 400)
else:
self.driver.swipe(500, 800, 600, 800, 400)
``` |
{
"source": "johnowhitaker/days_of_code",
"score": 3
} |
#### File: days_of_code/days_of_code/abc.py
```python
__all__ = ['parse_tune', 'get_tune', 'set_up_threads']
# Cell
def parse_tune(tune_content):
t = {
'Title': tune_content.text.split('\nT: ')[1].split('\n')[0],
'Type':tune_content.text.split('\nR: ')[1].split('\n')[0],
'Meter':tune_content.text.split('\nM: ')[1].split('\n')[0],
'Length':tune_content.text.split('\nL: ')[1].split('\n')[0],
'Key':tune_content.text.split('\nK: ')[1].split('\n')[0],
'Notes':''.join(tune_content.text.split('\nK: ')[1].split('\n')[1:],)
}
return t
# Cell
def get_tune(tune_url, savefile='data/all_tunes.csv'):
tune_page = requests.get('https://thesession.org'+tune_url)
tune_soup = BeautifulSoup(tune_page.text, 'html5lib')
tune_content = tune_soup.find('div', {"class": "notes"}) # Just the first arrangement
tune = parse_tune(tune_content)
with csv_writer_lock:
with open(savefile, mode="a") as f1:
review_writer = csv.writer(f1, delimiter=",")
review_writer.writerow(tune.values())
return tune
def set_up_threads(urls):
with ThreadPoolExecutor(max_workers=10) as executor:
return executor.map(get_tune,
urls,
timeout = 60)
``` |
{
"source": "johnowhitaker/ds_portfolio",
"score": 2
} |
#### File: dashboards/multiple_apps/index.py
```python
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app
from apps import gbv_app, diabetes_app
app.layout = html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
# We could make a separate 'homepage app'
if pathname == "/":
home = html.Div([
dcc.Link('Go to diabetes app', href='/apps/diabetes_app'),
html.P(),
dcc.Link('Go to gbv app', href='/apps/gbv_app')
])
return home
elif pathname == '/apps/gbv_app':
return gbv_app.layout
elif pathname == '/apps/diabetes_app':
return diabetes_app.layout
else:
return '404'
if __name__ == '__main__':
app.run_server(debug=True, port=8051, host='localhost')
``` |
{
"source": "johnowhitaker/swoggle",
"score": 3
} |
#### File: swoggle/swoggle/ai.py
```python
__all__ = ['RandomAgent', 'BasicAgent', 'win_rates', 'swoggle_to_state_vector', 'action_from_number']
# Cell
from .core import *
import random
class RandomAgent:
""" Given a swoggle board on which it is a player, make a random valid move """
def __init__(self, player):
self.player = player
def move(self, board, dice_roll):
# If in jail, try to escape
if self.player in board.jail:
board.move(self.player, (0, 0), (0, 0), dice_roll, False, False)
return 'escape'
# Get start_loc
start_loc = (9, 9)
for row in board.board:
for cell in row:
if cell.player == self.player:
start_loc = (cell.y, cell.x)
if start_loc == (9, 9):
return None
# Make a random move within reach
move = ()
count = 0
while True:
count += 1
end_x = random.choice(range(8))
end_y = random.choice(range(8))
drone = random.choice([True, False])
powerjump = random.choice([True, False])
valid = board.is_valid_move(self.player, start_loc, (end_x, end_y), dice_roll, drone=drone, powerjump=powerjump)
move = ()
if valid:
move = ((self.player, start_loc, (end_x, end_y), dice_roll, drone, powerjump))
# print(f'{self.player} took {count} tries to guess a random move')
break
board.move(*move)
return move
# Cell
class BasicAgent:
""" Given a swoggle board on which it is a player, make a sensible move """
def __init__(self, player):
self.player = player
def move(self, board, dice_roll):
# If in jail, try to escape
if self.player in board.jail:
board.move(self.player, (0, 0), (0, 0), dice_roll, False, False)
return 'escape'
# Get start_loc
start_loc = (9, 9)
for row in board.board:
for cell in row:
if cell.player == self.player:
start_loc = (cell.y, cell.x)
if start_loc == (9, 9):
return None
# If bases in range, take them
for row in board.board:
for cell in row:
if cell.player == None and cell.base != None and cell.base != self.player: # Normal move
move = (self.player, start_loc, (cell.x, cell.y), dice_roll, False, False)
if board.is_valid_move(*move):
board.move(*move)
return (move)
if cell.base != None and cell.base != self.player: # Drone attack
move = (self.player, start_loc, (cell.x, cell.y), dice_roll, True, False)
if board.is_valid_move(*move):
board.move(*move)
return (move)
# If on base and player in range, take or powerjump them
if board.board[start_loc[0]][start_loc[1]].base == self.player:
for row in board.board:
for cell in row:
if cell.player != None and cell.player != self.player:
# try normal move
move = (self.player, start_loc, (cell.x, cell.y), dice_roll, False, False)
if board.is_valid_move(*move):
board.move(*move)
return (move)
# Try powerjump
move = (self.player, start_loc, (cell.x, cell.y), dice_roll, False, True)
if board.is_valid_move(*move):
board.move(*move)
return (move)
# If players in range and takeable, take them
for row in board.board:
for cell in row:
if cell.player != None and cell.player != self.player:
# Normal take
move = (self.player, start_loc, (cell.x, cell.y), dice_roll, False, False)
if board.is_valid_move(*move):
board.move(*move)
return (move)
# Drone take
move = (self.player, start_loc, (cell.x, cell.y), dice_roll, True, False)
if board.is_valid_move(*move):
board.move(*move)
return (move)
# TODO: If player close to your base and base reacheable, go back to base
# Else move randomly
# Make a random move within reach
move = ()
count = 0
while True:
count += 1
end_x = random.choice(range(8))
end_y = random.choice(range(8))
drone = random.choice([True, False])
powerjump = random.choice([True, False])
valid = board.is_valid_move(self.player, start_loc, (end_x, end_y), dice_roll, drone=drone, powerjump=powerjump)
move = ()
if valid:
move = ((self.player, start_loc, (end_x, end_y), dice_roll, drone, powerjump))
# print(f'{self.player} took {count} tries to guess a random move')
break
board.move(*move)
return move
# Cell
from IPython.display import clear_output
def win_rates(n, agents):
wins = {}
for i in range(n):
rounds = 0
sr = Swoggle(agents, verbose=False)
while True:
sr.move_agents()
rounds += 1
players = []
for row in sr.board.board:
for cell in row:
if cell.player != None:
players.append(cell.player)
if len(players) <= 1:
clear_output(wait=True)
print("Winner:", players, rounds)
if len(players) == 1:
if players[0] in wins:
wins[players[0]] += 1
else:
wins[players[0]] = 1
break
return wins
# Cell
import numpy as np
def swoggle_to_state_vector(sr, player, dice_roll):
board = sr.board
spa = board.jail
# The player locations (192 = 3*8*8)
players = np.concatenate([np.array([c.player == p for c in np.array(sr.board.board).flatten()]).astype(int) for p in range(1, 5) if p != player])
# The base locations of the other players (192 = 3*8*8)
bases = np.concatenate([np.array([c.base == p for c in np.array(sr.board.board).flatten()]).astype(int) for p in range(1, 5) if p != player])
# The drones (64 = 8*8)
drones = np.array([c.drone for c in np.array(sr.board.board).flatten()]).astype(int)
# Player location and base (64 each)
player_loc = np.array([c.player == player for c in np.array(sr.board.board).flatten()]).astype(int)
base = np.array([c.base == player for c in np.array(sr.board.board).flatten()]).astype(int)
# dice (6)
dice = np.array([int(i==dice_roll) for i in range(1, 7)])
# Spa (3)
prisoners = np.array([p in spa for p in range(1, 5) if p != player]).astype(int)
return np.concatenate([players, bases, player_loc, base, drones, dice, prisoners])
# Cell
def action_from_number(move, player, sw, dice_roll):
""" Takes the output of the network, samples from the probs, does the move if possible, returns move_type, the move itself and the move number"""
board = sw.board
move_number = move * 1
# Start by getting player's current loc
start_loc = (9, 9)
for row in board.board:
for cell in row:
if cell.player == player:
start_loc = (cell.y, cell.x)
if start_loc == (9, 9): # Player
action = (player, (9, 9),(9, 9), dice_roll, False, False)
return 'dead', action, 0
drone, pj = False, False
move_type = ''
if move//64 == 0: # Normal Move
move_type = 'normal'
elif move//64 == 1:
move -= 64
pj=True
move_type = 'powerjump'
else:
move -= 128
drone = True
move_type = 'drone'
x = move//8
y = move%8
action = (player, start_loc, (x, y), dice_roll, drone, pj)
return move_type, action, move_number
``` |
{
"source": "johnowl/python-studies",
"score": 3
} |
#### File: hello/service/validator_service_test.py
```python
from src.main.python.com.johnowl.hello.service.validator_service import ValidatorService
from src.main.python.com.johnowl.hello.service.validator_service import ValidationError
import unittest
class HelloServiceTest(unittest.TestCase):
def test_when_validate_without_body_and_with_valid_data_should_return_true(self):
service = ValidatorService()
headers = {
"Application-Id": "273ab84d-0c7d-434a-a9f7-3004eabf54a4", # random uuid v4
"Digest": "sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" # empty string hash
}
result = service.is_valid(headers)
self.assertEqual(result, True)
def test_when_validate_without_body_and_with_invalid_application_id_should_return_error(self):
service = ValidatorService()
headers = {
"Application-Id": "273ab84d-0c7d-434a-a9f7-3004eabf54", # invalid uuid v4
"Digest": "sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" # empty string hash
}
result = service.is_valid(headers)
self.assertIsInstance(result, ValidationError)
self.assertEqual(result.kind, "application_id_invalid")
self.assertEqual(result.message, "Application-Id inválido.")
def test_when_validate_without_body_and_with_invalid_digest_should_return_error(self):
service = ValidatorService()
headers = {
"Application-Id": "273ab84d-0c7d-434a-a9f7-3004eabf54a4", # random uuid v4
"Digest": "sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b856" # invalid hash
}
result = service.is_valid(headers)
self.assertIsInstance(result, ValidationError)
self.assertEqual(result.kind, "digest_invalid")
self.assertEqual(result.message, "Digest inválido.")
def test_when_validate_without_body_and_with_invalid_format_digest_should_return_error(self):
service = ValidatorService()
headers = {
"Application-Id": "273ab84d-0c7d-434a-a9f7-3004eabf54a4", # random uuid v4
"Digest": "sha256=e3b0c44298fc1c149afbf4c8996fb924?7ae41e4649b934ca495991b7852b856" # invalid format hash
}
result = service.is_valid(headers)
self.assertIsInstance(result, ValidationError)
self.assertEqual(result.kind, "digest_invalid")
self.assertEqual(result.message, "Digest inválido.")
def test_when_validate_with_valid_data_should_return_true(self):
service = ValidatorService()
headers = {
"Application-Id": "273ab84d-0c7d-434a-a9f7-3004eabf54a4", # random uuid v4
"Digest": "sha256=5e2bf57d3f40c4b6df69daf1936cb766f832374b4fc0259a7cbff06e2f70f269" # valid hash
}
result = service.is_valid(headers, body="lorem ipsum")
self.assertTrue(result)
def test_when_validate_without_body_and_with_empty_application_id_should_return_error(self):
service = ValidatorService()
headers = {
"Application-Id": "",
"Digest": "sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" # empty string hash
}
result = service.is_valid(headers)
self.assertIsInstance(result, ValidationError)
self.assertEqual(result.kind, "application_id_not_found")
self.assertEqual(result.message, "Application-Id não encontrado.")
def test_when_validate_without_body_and_with_empty_digest_should_return_error(self):
service = ValidatorService()
headers = {
"Application-Id": "273ab84d-0c7d-434a-a9f7-3004eabf54", # invalid uuid v4
"Digest": "" # empty string hash
}
result = service.is_valid(headers)
self.assertIsInstance(result, ValidationError)
self.assertEqual(result.kind, "digest_not_found")
self.assertEqual(result.message, "Digest não encontrado.")
def test_when_validate_without_body_and_without_digest_should_return_error(self):
service = ValidatorService()
headers = {
"Application-Id": "273ab84d-0c7d-434a-a9f7-3004eabf54" # invalid uuid v4
}
result = service.is_valid(headers)
self.assertIsInstance(result, ValidationError)
self.assertEqual(result.kind, "digest_not_found")
self.assertEqual(result.message, "Digest não encontrado.")
def test_when_validate_without_body_and_without_application_id_should_return_error(self):
service = ValidatorService()
headers = {
"Digest": "sha256=e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" # empty string hash
}
result = service.is_valid(headers)
self.assertIsInstance(result, ValidationError)
self.assertEqual(result.kind, "application_id_not_found")
self.assertEqual(result.message, "Application-Id não encontrado.")
``` |
{
"source": "JohnOyster/Computer-Vision",
"score": 3
} |
#### File: Computer-Vision/HOG/hog.py
```python
from enum import Enum
import os.path
import cv2
import numpy as np
# ----------------------------------------------------------------------------
# Define Test Images
def test_images():
"""Generator to return test images.
:return: Test Image Filename
:rtype: str
"""
test_directory = './NICTA/TestSet/PositiveSamples'
test_set = [
'item_00000000.pnm'
]
for image, _ in enumerate(test_set):
yield os.path.join(test_directory, test_set[image])
def gamma_correction(image, gamma=1.0):
"""Power-Law (Gamma) Transformation of grayscale image.
:param image: Original Image
:type: numpy.ndarray
:param gamma: Gamma value to apply
:type: float
:return: gamma transformed image file
:rtype: numpy.ndarray
"""
bits_per_pixel = np.iinfo(image.dtype).max
norm_image = image / np.max(image)
new_image = bits_per_pixel * np.power(norm_image, gamma)
new_image = new_image.astype(np.uint8)
return new_image
def compute_gradients(image, is_signed=False):
"""
- Cellsize[8 8]
- Blocksize[16 16]
- Gradient operators: Gx = [-1 0 1] and Gy = [-1 0 1]T
- Number of orientation bins = 9
:param is_signed:
:param image:
:return:
"""
kernel_prewitt = np.array([[1, 1, 1],
[0, 0, 0],
[-1, -1, -1]])
gradient_x = cv2.filter2D(image, -1, kernel_prewitt)
gradient_y = cv2.filter2D(image, -1, np.transpose(kernel_prewitt))
orientation_size = 360 if is_signed else 180
magnitude = np.sqrt(np.square(gradient_x) + np.square(gradient_y))
angle = np.arctan2(gradient_y, gradient_x) * (orientation_size / np.pi)
return gradient_x, gradient_y, magnitude, angle
def calculate_histogram(magnitudes, angles, bin_count=9, is_signed=False):
"""Calculate the localized histogram of each cell.
:param magnitudes: The maginitude of each cell
:type: np.ndarray
:param angles: The angle of each cell
:type: np.ndarray
:param bin_count: The bins of each cell
:type: int
:param is_signed: Should angular data be signed?
:type: bool
:return: Histogram of cell
:rtype: np.array
"""
cell_size_x, cell_size_y = magnitudes.shape
orientation_size = 360 if is_signed else 180
bin_width = orientation_size // bin_count
cell_histogram = np.zeros(bin_count)
for row in range(cell_size_x):
for col in range(cell_size_y):
orientation = angles[row][col]
histogram_bin = int(orientation // bin_width)
cell_histogram[histogram_bin] += magnitudes[row][col]
return cell_histogram / np.product(magnitudes.shape)
def compute_weighted_vote(gradient, cell_size=(8, 8), bin_count=9, is_signed=False):
"""Compute the weighted vote of each cell.
:param gradient: Gradient value of each cell
:type: np.ndarray
:param cell_size: 2D cell size
:type: tuple
:param bin_count: number of bins
:type: int
:param is_signed: is the orientatinos signed or not
:type: bool
:return: weighted histgram
:rtype: np.ndarray
"""
gradient_x = gradient[0]
gradient_y = gradient[1]
gradient_magnitudes = gradient[2]
gradient_angles = gradient[3]
grad_size_x, grad_size_y = gradient_magnitudes.shape
cell_size_x, cell_size_y = cell_size
cell_count_x = int(grad_size_x / cell_size_x) # Number of cells in x axis
cell_count_y = int(grad_size_y / cell_size_y) # Number of cells in y axis
#print("[INFO] Cell counts: x={} y={}".format(cell_count_x, cell_count_y))
hog_cells = np.zeros((cell_count_x, cell_count_y, bin_count))
prev_x = 0
# Compute HOG of each cell
for row in range(cell_count_x):
prev_y = 0
for col in range(cell_count_y):
magnitudes_cell = gradient_magnitudes[prev_x:prev_x + cell_size_x, prev_y:prev_y + cell_size_y]
angles_cell = gradient_angles[prev_x:prev_x + cell_size_x, prev_y:prev_y + cell_size_y]
hog_cells[row][col] = calculate_histogram(magnitudes_cell, angles_cell, bin_count, is_signed)
prev_y += cell_size_y
prev_x += cell_size_x
#print("[DEBUG] Cells array shape: {}".format(hog_cells.shape))
return hog_cells, (cell_count_x, cell_count_y)
def contrast_normalize(vector, epsilon=1e-5):
"""Performt the L2-norm on block.
:param vector: The input
:type: np.ndarray
:param epsilon: That CYA value
:type: float
:return: Normalized block
:rtype" np.ndarray
"""
#print("[DEBUG] What am I normalizing?: {}".format(vector.shape))
return vector / np.sqrt(np.linalg.norm(np.square(vector), 2) + np.square(epsilon))
def normalize_blocks(cells, cell_size=(8, 8), block_size=(16, 16), bin_count=9):
"""Normalize all the things!
:param cells: Input cell
:type: np.ndarray
:param cell_size: 2D tuple
:type: tuple
:param block_size: 2D tuple
:type: tuple
:param bin_count: Number of bins
:type: bin
:return: Array of normalized blocks
:rtype: np.ndarray
"""
cell_size_x, cell_size_y = cells.shape[:2]
block_size_x, block_size_y = block_size
block_count_x = cell_size_x - 1
block_count_y = cell_size_y - 1
cells_per_block_x = int(block_size_x // cell_size[0])
cells_per_block_y = int(block_size_y // cell_size[1])
#print("[INFO] Block counts: x={} y={}".format(block_count_x, block_count_y))
normalized_blocks = np.zeros((block_count_x, block_count_y, cells_per_block_x*cells_per_block_y*bin_count))
# Normalize HOG by block
for row in range(block_count_x):
for col in range(block_count_y):
xrange = row+cells_per_block_x
yrange = col+cells_per_block_y
#print("[DEBUG] Row={} Col={}\n\t Getting cells {} and {}".format(row, col, xrange, yrange))
hog_block = cells[row:row + cells_per_block_x, col:col + cells_per_block_y].ravel()
normalized_blocks[row, col] = contrast_normalize(hog_block)
return normalized_blocks, (block_count_x, block_count_y)
if __name__ == '__main__':
for my_image in test_images():
# Step 1 - Input Image
# Load in the test image, resize to 64x128, and convert to grayscale
print("[INFO] Loading test image {}".format(my_image))
test_image = cv2.imread(my_image)
test_image = cv2.resize(test_image, (64, 128))
test_image = cv2.cvtColor(test_image, cv2.COLOR_RGB2GRAY)
# Step 2 - Normalize gamma and color
gamma_value = 1.0
test_image = gamma_correction(test_image, gamma_value)
# Step 3 - Compute gradients
test_gradient = compute_gradients(test_image)
TEST_GRADIENT = True
if TEST_GRADIENT:
gx, gy = test_gradient[0], test_gradient[1]
gheight = gx.shape[1] * 4
gwidth = gx.shape[0] * 4
gx = cv2.resize(gx, (gheight, gwidth))
gy = cv2.resize(gy, (gheight, gwidth))
output_stack = np.hstack((gx, gy))
cv2.imshow('Filter results', output_stack)
cv2.waitKey(0)
# Step 4 - Weighted vote into spatial and orientation cells
cell_histograms, _ = compute_weighted_vote(test_gradient)
# Step 5 - Contrast normalize over overlapping spatial blocks
hog_blocks, _ = normalize_blocks(cell_histograms)
# Step 6 - Collect HOG's over detection window
print(hog_blocks.ravel().shape)
# Step 7 - Linear SVM
cv2.destroyAllWindows()
``` |
{
"source": "johnp418/lumohacks2017",
"score": 2
} |
#### File: sleepee/diary/views.py
```python
from diary.models import Diary, Nap, Patient, Physician
from django.shortcuts import get_object_or_404
from diary.serializers import DiarySerializer, UserSerializer
from django.http import Http404
from rest_framework import viewsets, status
from rest_framework.response import Response
# Create your views here.
class DiaryViewSet(viewsets.ViewSet):
def list(self, request):
queryset = Diary.objects.all()
serializer = DiarySerializer(queryset, many=True)
return Response(serializer.data)
def retrieve(self, request, pk):
try:
queryset = Diary.objects.get(id=pk)
except Diary.DoesNotExist:
raise Http404
serializer = DiarySerializer(queryset)
return Response(serializer.data)
def create(self, request):
serializer = DiarySerializer(data=request.data)
print('creating diary')
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# def update(self, request, pk):
# pass
def partial_update(self, request, pk):
try:
queryset = Diary.objects.get(id=pk)
except Diary.DoesNotExist:
raise Http404
serializer = DiarySerializer(queryset, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, pk):
try:
queryset = Diary.objects.get(id=pk)
except Diary.DoesNotExist:
raise Http404
queryset.delete()
return Response("deletion successful", status=status.HTTP_202_ACCEPTED)
class UserViewSet(viewsets.ViewSet):
# def retrieve(self, request, pk):
# try:
# queryset = Diary.objects.get(id=pk)
# except Diary.DoesNotExist:
# raise Http404
# serializer = DiarySerializer(queryset)
# return Response(serializer.data)
def create(self, request):
serializer = UserSerializer(data=request.data)
print(request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def partial_update(self, request, pk):
try:
queryset = Diary.objects.get(id=pk)
except Diary.DoesNotExist:
raise Http404
serializer = DiarySerializer(queryset, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def destroy(self, request, pk):
try:
queryset = Diary.objects.get(id=pk)
except Diary.DoesNotExist:
raise Http404
queryset.delete()
return Response("deletion successful", status=status.HTTP_202_ACCEPTED)
``` |
{
"source": "JohnPapagiannakos/shortestpathsinpolygons",
"score": 3
} |
#### File: JohnPapagiannakos/shortestpathsinpolygons/graphs.py
```python
import math
from collections import deque
class Node:
def __init__(self, fid, face, link=None):
# Face id
self.fid = fid
self.face = face
# Coordinates of node, [x, y]
self.coords = face.getCentroid()
# Pointer to other node in the graph
self.link = list()
self.visited = False
def makeLink(self, toNode):
# Distance between nodes
p = self.getCoordinates()
q = toNode.getCoordinates()
distance = math.sqrt(math.pow((q[0] - p[0]), 2) + math.pow((q[1] - p[1]),2))
self.link.append(Link(self, toNode, distance))
def getId(self):
return self.fid
def getCoordinates(self):
return self.coords
class Link:
def __init__(self, origin, tail, distance):
# Points to the origin node
self.origin = origin
# Points to the tail node
self.tail = tail
# Eucl. distance between two nodes
self.distance = distance
def getTail(self):
return self.tail
class Graph:
def __init__(self, fid=None, node=None):
self.head = None
self.list_nodes = list()
self.num_nodes = 0
def sortById(self):
self.list_nodes = sorted(self.list_nodes, key=lambda x: (x.fid))
def addNode(self, newNode, fid):
newNode = Node(fid, newNode, None)
self.list_nodes.append(newNode)
if self.head is None:
self.head = newNode
self.num_nodes += 1
return newNode
def makeLink(self, n1, n2):
n1 = self.list_nodes[n1.getId() - 1]
n2 = self.list_nodes[n2.getId() - 1]
# link n1->n2 && n2->n1
n1.makeLink(n2)
n2.makeLink(n1)
def getNodeByFaceId(self, fid):
return self.list_nodes[fid - 1]
def DFS(self, path, end):
last_visited = path[-1]
for e in last_visited.link:
t = e.getTail()
if t.visited == False:
t.visited = True
path.append(t)
if t == end:
return path
if self.DFS(path, end):
return path
path.pop()
return
# Depth-First Search Non-Recursive Function <=> Breadth-First Search
# https://stackoverflow.com/questions/8922060/how-to-trace-the-path-in-a-breadth-first-search
# In python iterative approach performs better than reccursive.
def BFS(self, start, end):
# maintain a queue of paths
queue = []
# push the first path into the queue
queue.append([start])
while queue:
# get the first path from the queue
path = queue.pop(0)
# get the last node from the path
node = path[-1]
# path found
if node == end:
return path
# enumerate all adjacent nodes, construct a new path and push it into the queue
for adjacent in node.link:
t = adjacent.getTail()
if t.visited == False:
t.visited = True
new_path = list(path)
new_path.append(t)
queue.append(new_path)
def startTraversal(self, start, end):
print("Starting to find shortest path ...")
start.visited = True
path = [start]
if start == end:
return None
path = self.BFS(start, end)
print(len(path))
print("Found shortest path!")
print("\n")
return path
```
#### File: JohnPapagiannakos/shortestpathsinpolygons/project_libs.py
```python
import os
import sys
from osgeo import gdal, osr, ogr
from dcel import *
# Compute Distance using Haversine formula
def haversineDistance(point1, point2):
R = 6371 # mean(Earth radius)
dLat = math.radians(point2.y) - math.radians(point1.y)
dLon = math.radians(point2.x) - math.radians(point1.x)
a = math.sin(dLat/2) * math.sin(dLat/2) + math.cos(math.radians(point1.y)) * math.cos(math.radians(point2.y)) * math.sin(dLon/2) * math.sin(dLon/2)
return R * 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
# Fetch the schema information for this layer (https://pcjericks.github.io/py-gdalogr-cookbook/vector_layers.html).
def addGeometryToLayer(input_layer, input_polygon):
featureDefn = input_layer.GetLayerDefn()
outFeature = ogr.Feature(featureDefn)
outFeature.SetGeometry(input_polygon)
input_layer.CreateFeature(outFeature)
outFeature = None
# Create the polygon from dataset
def createPolygon(dataset, starting_point, ending_point):
Polygon = None
layer = dataset.GetLayer()
for feature in layer:
geom = feature.GetGeometryRef()
if geom.Contains(starting_point):
if not geom.Contains(ending_point):
print("There exists no path between the two given points!")
return
# Convert a geometry into well known binary format.
wkb = geom.ExportToWkb()
Polygon = ogr.CreateGeometryFromWkb(wkb)
return Polygon
# Implementation of Funnel Algorithm and miscellaneous functions.
# based on link[2] https://github.com/mmmovania/poly2tri.as3/blob/master/src/org/poly2tri/utils/NewFunnel.as
# link[3] https://gamedev.stackexchange.com/questions/68302/how-does-the-simple-stupid-funnel-algorithm-work/68305
#
# Computes and returns the Euclidean distance between points a and b.
def vdistsqr(a, b):
x = b.x - a.x
y = b.y - a.y
return math.sqrt(math.pow(x, 2) + math.pow(y, 2))
# Boolean function. Returns true if |a - b| < 1e-6 \approx "0"
def vequal(a, b):
eq = math.pow(0.001, 2)
return vdistsqr(a, b) < eq
# Computes and returns the cross_product(u,v), where u = b-a, v = c-a,
# or equivalently the Area A of a triangle \Delta(a,b,c) times 2.
def triAreaX2(a, b, c):
ax = b.x - a.x
ay = b.y - a.y
bx = c.x - a.x
by = c.y - a.y
return (bx * ay - ax * by)
def funnel(starting_point, ending_point, diagonal_list):
if diagonal_list is None:
path = [starting_point, ending_point]
return path
leftList = list()
rightList = list()
for e in diagonal_list:
origin = e.getOrigin()
op = point(origin.getCoordinates()[0], origin.getCoordinates()[1])
tail = e.getTail()
tp = point(tail.getCoordinates()[0], tail.getCoordinates()[1])
leftList.append(op)
rightList.append(tp)
leftList.append(ending_point)
rightList.append(ending_point)
path = [starting_point]
rightNode = rightList[0]
leftNode = leftList[0]
leftIdx = 0
rightIdx = 0
apex = starting_point
i = 0
while i<len(diagonal_list):
i += 1
nextRight = rightList[i]
nextLeft = leftList[i]
# Update right vertex.
if triAreaX2(apex, rightNode, nextRight) <= 0:
if vequal(apex, rightNode) or triAreaX2(apex, leftNode, nextRight) > 0:
# Tighten the funnel.
rightNode = nextRight
rightIdx = i
else:
# Right over left, insert left to path and restart scan from portal left point.
path.append(leftNode)
apex = leftNode
apexIndex = leftIdx
# Reset funnel
leftNode = apex
rightNode = apex
rightIdx = apexIndex
# Restart scan.
i = apexIndex
continue
# Update left vertex
if triAreaX2(apex, leftNode, nextLeft) >= 0:
if vequal(apex, leftNode) or triAreaX2(apex, rightNode, nextLeft) < 0:
# Tighten the funnel.
leftNode = nextLeft
leftIdx = i
else:
# Left over right, insert right to path and restart scan from portal right point.
path.append(rightNode)
# Make current right the new apex.
apex = rightNode
apexIndex = rightIdx
# Reset portal.
leftNode = apex
rightNode = apex
leftIdx = apexIndex
rightIdx = apexIndex
# Restart scan.
i = apexIndex
continue
path.append(ending_point)
return path
# Finds and returns the intersected diagonals.
def findIntersectedDiagonals(Dcel, shortest_path):
intersectedDiagonals = list()
for ii in range(0, len(shortest_path) - 1):
face = Dcel.faces[shortest_path[ii].fid]
s = face.getOuterEdge()
if s.getTwin().getFace().getId() == shortest_path[ii + 1].fid:
intersectedDiagonals.append(s)
e = s.getNext()
while e != s:
if e.getTwin().getFace().getId() == shortest_path[ii + 1].fid:
intersectedDiagonals.append(e)
break
e = e.getNext()
return intersectedDiagonals
# ---Main Routine---
# Given two points inside a polygon, this function returns the shortest path
# between the two points, lying inside the polygon.
# Input: * input_starting_point, the given starting point in format (x,y).
# * input_ending_point, the given ending point in format (x,y).
# * inputFileName, the input shape file (.shp).
# * outputFileName, a list of the output shape files (.shp).
# (can be an empty list)
# * driverName, the shapefile format.
# * outputDataDir, the directory where the produced files will be saved.
# Output: void
def findShortestPathInPolygon(input_starting_point, input_ending_point, inputFileName, outputFileName, driverName, outputDataDir):
if inputFileName is None:
sys.exit("Empty inputFileName!")
if len(outputFileName) > 5:
print('Ignoring extra filenames')
if len(outputFileName) < 5:
print('Using default output filenames')
outputFileName = list()
for i in range(5):
outputFileName.append('output_' + str(i) + '.shp')
starting_point = ogr.Geometry(ogr.wkbPoint)
starting_point.AddPoint(input_starting_point.x, input_starting_point.y)
ending_point = ogr.Geometry(ogr.wkbPoint)
ending_point.AddPoint(input_ending_point.x, input_ending_point.y)
# Open the specified shape file
dataset = gdal.OpenEx(inputFileName, gdal.OF_VECTOR)
if not dataset:
sys.exit("Specified project directory does not exist or is empty! Terminating...")
else:
print("Data files have been read successfully!")
# Create Polygon from dataset
Polygon = createPolygon(dataset, starting_point, ending_point)
if Polygon is None:
print("Empty Polygon! There is no polygon that contains input starting-ending points.")
return
# Close dataset
dataset = None
drv = ogr.GetDriverByName(driverName)
outputFileName[0] = os.path.join(outputDataDir, outputFileName[0])
if os.path.exists(outputFileName[0]):
drv.DeleteDataSource(outputFileName[0])
outDataSource = drv.CreateDataSource(outputFileName[0])
outLayer = outDataSource.CreateLayer(outputFileName[0], geom_type=ogr.wkbPolygon)
addGeometryToLayer(outLayer, Polygon)
# Build the dcel structure from the polygon
newDcel = Dcel()
newDcel.buildFromPolygon(Polygon)
# Monotonize Polygon P (this step is used only to visualize Monotonized P separetelly...)
print("Monotonizing : Polygon P")
newDcel.makeMonotone()
# Create a new shapefile...
# Write the monotonized polygons...
outSHPfn = os.path.join(outputDataDir, outputFileName[1])
if os.path.exists(outSHPfn):
drv.DeleteDataSource(outSHPfn)
outDataSource = drv.CreateDataSource(outSHPfn)
outLayer = outDataSource.CreateLayer(outSHPfn, geom_type=ogr.wkbPolygon)
for i in range(1, newDcel.num_faces):
poly = getPolygonByFace(newDcel.faces[i])
addGeometryToLayer(outLayer, poly)
####
# Replace prev shapefile
newDcel = Dcel()
newDcel.buildFromPolygon(Polygon)
# Triangulate the monotonized polygon
print("Monotonizing + Triangulating : Polygon P")
newDcel.makeMonotone()
newDcel.triangulate()
# Create a new shapefile...
# Write the triangulated & monotonized polygons...
outSHPfn = os.path.join(outputDataDir, outputFileName[2])
if os.path.exists(outSHPfn):
drv.DeleteDataSource(outSHPfn)
outDataSource = drv.CreateDataSource(outSHPfn)
outLayer = outDataSource.CreateLayer(outSHPfn, geom_type=ogr.wkbPolygon)
for i in range(1, newDcel.num_faces):
poly = getPolygonByFace(newDcel.faces[i])
if poly.Contains(starting_point):
start_face = newDcel.faces[i]
if poly.Contains(ending_point):
end_face = newDcel.faces[i]
addGeometryToLayer(outLayer, poly)
# Find the shortest path between the given starting and end points
extractedGraph = newDcel.extractGraph()
starting_node = extractedGraph.getNodeByFaceId(start_face.getId())
ending_node = extractedGraph.getNodeByFaceId(end_face.getId())
shortest_path = extractedGraph.startTraversal(starting_node, ending_node)
outSHPfn = os.path.join(outputDataDir, outputFileName[3])
if os.path.exists(outSHPfn):
drv.DeleteDataSource(outSHPfn)
outDataSource = drv.CreateDataSource(outSHPfn)
outLayer = outDataSource.CreateLayer(outSHPfn, geom_type=ogr.wkbLineString)
# Create a LineString
path_line = ogr.Geometry(ogr.wkbLineString)
for p in shortest_path:
path_line.AddPoint(p.getCoordinates()[0], p.getCoordinates()[1])
addGeometryToLayer(outLayer, path_line)
# Find the intersected diagonals.
intersectedDiagonals = findIntersectedDiagonals(newDcel, shortest_path)
# Funnel Algorithm
actual_path = funnel(input_starting_point, input_ending_point, intersectedDiagonals)
outSHPfn = os.path.join(outputDataDir, outputFileName[4])
if os.path.exists(outSHPfn):
drv.DeleteDataSource(outSHPfn)
outDataSource = drv.CreateDataSource(outSHPfn)
outLayer = outDataSource.CreateLayer(outSHPfn, geom_type=ogr.wkbLineString)
# Create a LineString
path_line = ogr.Geometry(ogr.wkbLineString)
# Calculate total distance of shortest path (Ellipsoidal formula)
distance = 0
idx = 0
for p in actual_path:
path_line.AddPoint(p.x, p.y)
if idx > 0:
distance += haversineDistance(actual_path[idx-1], actual_path[idx])
idx+=1
addGeometryToLayer(outLayer, path_line)
print("Total length (Cartesian) " + " = " + " {:.2f} deg".format(path_line.Length()))
print("Total length (Ellipsoidal)" + u" \u2248 " + " {:.1E} km".format(distance))
``` |
{
"source": "johnpappas/task_queue",
"score": 3
} |
#### File: johnpappas/task_queue/property_utils.py
```python
PROJECT_FILE_NAME='APP_NAME'
DELIMITER='_'
def get_app_name_prefix():
with open(PROJECT_FILE_NAME, 'r') as myfile:
app_name=myfile.read().replace('\n', '')
return app_name + DELIMITER
```
#### File: johnpappas/task_queue/task_httpReqContentToQueue.py
```python
import requests
import RedisUtils
import time
#Q_RESULT_PUT_QUEUE = RedisUtils.RedisQueue('result_put_queue')
def count_words_at_url(url, qname):
put_queue = RedisUtils.RedisQueue(qname)
resp = requests.get(url)
#return len(resp.text.split())
put_queue.put(resp.text)
#Q_RESULT_PUT_QUEUE.put(resp.text)
#return resp.text
``` |
{
"source": "JohnPaquete/deck-wolf",
"score": 2
} |
#### File: JohnPaquete/deck-wolf/app.py
```python
from bottle import Bottle, run, template, static_file, request, redirect
from src.bottle_sqlite import SQLitePlugin
from src.models import Schema
from src.service import DeckMakerService
app = Bottle()
sqlite_plugin = SQLitePlugin(dbfile='data/test.db')
app.install(sqlite_plugin)
# The home page
@app.route('/')
def index(db):
return DeckMakerService().index(db)
# Card index page
@app.route('/cards')
@app.route('/cards/')
def card_index():
return DeckMakerService().card_index()
# Random card page
@app.route('/cards/random')
@app.route('/cards/random/')
def card_random(db):
return DeckMakerService().card_random(db)
# Card page by id
@app.route('/cards/:item')
def card(item, db):
return DeckMakerService().card(db, item)
# Posting of the collection form on card pages
@app.route('/cards/:item', method='POST')
def card_post(item, db):
DeckMakerService().card_post(db, item, request.forms)
redirect(f"/cards/{item}")
# Card page by oracle id
@app.route('/cards/oracle/:item')
def oracle_card(item, db):
return DeckMakerService().oracle_card(db, item)
# Sets index page
@app.route('/sets')
@app.route('/sets/')
def sets_index(db):
return DeckMakerService().sets_index(db, request.query)
# Sets card list page by set code
@app.route('/sets/:item')
def sets_card_list(item, db):
return DeckMakerService().sets_card_list(db, request.query, item)
# Collection index page
@app.route('/collection')
@app.route('/collection/')
def collection(db):
return DeckMakerService().collection(db, request.query)
# Collection post from collection index
@app.route('/collection/:item', method='POST')
def collection_post(item, db):
DeckMakerService().collection_post(db, item, request.forms)
redirect("/collection")
# Binders index page
@app.route('/collection/binders')
@app.route('/collection/binders/')
def binders_index(db):
return DeckMakerService().binders_index(db, request.query)
# Binder create
@app.route('/collection/binders', method='POST')
@app.route('/collection/binders/', method='POST')
def binders_create(db):
DeckMakerService().binders_post(db, request.forms)
redirect("/collection/binders/")
# Binder view page by id
@app.route('/collection/binders/:item')
@app.route('/collection/binders/:item/')
def binders(db, item):
return DeckMakerService().binders(db, request.query, item)
# Binder delete and edit by id
@app.route('/collection/binders/:item', method='POST')
@app.route('/collection/binders/:item/', method='POST')
def binders_edit(db, item):
DeckMakerService().binders_post(db, request.forms, item=item)
if request.forms.get('redirect') is not None:
redirect(request.forms.get('redirect'))
else:
redirect("/collection/binders/")
# Deck index page
@app.route('/decks')
@app.route('/decks/')
def decks(db):
return DeckMakerService().decks_index(db, request.query)
# Deck creation form page
@app.route('/decks/create')
@app.route('/decks/create/')
def decks_create():
return DeckMakerService().decks_create()
# Deck creation form post
@app.route('/decks/create', method='POST')
@app.route('/decks/create/', method='POST')
def decks_create_post(db):
DeckMakerService().decks_post(db, request.forms)
redirect("/decks")
# Deck edit form by id
@app.route('/decks/edit/:item')
@app.route('/decks/edit/:item/')
def decks_edit(db, item):
return DeckMakerService().decks_edit(db, item)
# Deck edit form post
@app.route('/decks/edit/:item', method='POST')
@app.route('/decks/edit/:item/', method='POST')
def decks_edit_post(db, item):
DeckMakerService().decks_post(db, request.forms, item=item)
redirect(f"/decks/{item}")
# Deck view page by id
@app.route('/decks/:item')
@app.route('/decks/:item/')
def decks_view(db, item):
return DeckMakerService().decks(db, request.query, item)
# Deck delete by id
@app.route('/decks/:item', method='POST')
@app.route('/decks/:item/', method='POST')
def decks_delete(db, item):
DeckMakerService().decks_post(db, request.forms, item=item)
redirect("/decks/")
# Search index page
@app.route('/search')
@app.route('/search/')
def search(db):
return DeckMakerService().search(db, request.query)
# Search index page
@app.route('/search', method='POST')
@app.route('/search/', method='POST')
def search(db):
DeckMakerService().search_post(db, request.query, request.forms)
redirect('/search/?' + request.query_string)
# Advanced search page
@app.route('/advanced-search')
@app.route('/advanced-search/')
def card_random(db):
return DeckMakerService().advanced_search(db)
# Bout page
@app.route('/about')
@app.route('/about/')
def card_random(db):
return template('about')
# Static file for assets
@app.route('/assets/<filepath:path>')
def asset(filepath):
return static_file(filepath, root='./assets')
# Endpoint for card autocomplete
@app.route('/api/card_autocomplete/', method='POST')
@app.route('/api/card_autocomplete', method='POST')
def card_autocomplete(db):
return DeckMakerService().card_autocomplete(db, request.json)
# 404 page
@app.error(404)
@app.error(405)
def error404(error):
return template('card_404')
if __name__ == "__main__":
Schema()
run(app, host='localhost', port=8080, debug=True, reloader=False)
```
#### File: deck-wolf/tests/test_viewtilities.py
```python
import unittest
import src.viewtilities as util
class TestViewtilities(unittest.TestCase):
def test_legality(self):
self.assertEqual("Legal", util.legality("legal"))
self.assertEqual("Not Legal", util.legality("not_legal"))
self.assertEqual("Restricted", util.legality("restricted"))
self.assertEqual("Banned", util.legality("banned"))
self.assertEqual("NO ENTRY", util.legality(None))
self.assertEqual("NO ENTRY", util.legality(15))
self.assertEqual("NO ENTRY", util.legality("Error"))
def test_legality_bg(self):
self.assertEqual("table-success", util.legality_bg("legal"))
self.assertEqual("table-secondary", util.legality_bg("not_legal"))
self.assertEqual("table-warning", util.legality_bg("restricted"))
self.assertEqual("table-danger", util.legality_bg("banned"))
self.assertEqual(None, util.legality_bg(None))
self.assertEqual(None, util.legality_bg(15))
self.assertEqual(None, util.legality_bg("Error"))
def test_rarity(self):
self.assertEqual("Common", util.rarity("common"))
self.assertEqual("Uncommon", util.rarity("uncommon"))
self.assertEqual("Rare", util.rarity("rare"))
self.assertEqual("Mythic", util.rarity("mythic"))
self.assertEqual("NO ENTRY", util.rarity(None))
self.assertEqual("NO ENTRY", util.rarity(15))
self.assertEqual("NO ENTRY", util.rarity("Error"))
def test_curency(self):
self.assertEqual("$0.12", util.currency(0.12, "$"))
self.assertEqual("$0.12", util.currency("0.12", "$"))
self.assertEqual("0.12", util.currency("0.12", None))
self.assertEqual("--", util.currency(None, "$"))
self.assertEqual("0.12", util.currency(0.12, None))
``` |
{
"source": "JohnParken/pycorrector",
"score": 3
} |
#### File: pycorrector/seq2seq_attention/infer.py
```python
import os
import sys
sys.path.append('../..')
from pycorrector.seq2seq_attention import config
from pycorrector.seq2seq_attention.model import Seq2SeqModel
from pycorrector.seq2seq_attention.data_reader import load_word_dict
def plot_attention(attention, sentence, predicted_sentence, img_path):
"""
Plotting the attention weights
:param attention:
:param sentence:
:param predicted_sentence:
:param img_path:
:return:
"""
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from matplotlib import font_manager
my_font = font_manager.FontProperties(fname="/Library/Fonts/Songti.ttc")
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(1, 1, 1)
ax.matshow(attention, cmap='viridis')
fontdict = {'fontsize': 12}
ax.set_xticklabels([''] + sentence, fontdict=fontdict, fontproperties=my_font) # rotation=90,
ax.set_yticklabels([''] + predicted_sentence, fontdict=fontdict, fontproperties=my_font)
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
# plt.show()
plt.savefig(img_path)
print("save attention weight image to :", img_path)
plt.clf()
def infer(model, sentence, attention_image_path=''):
result, sentence, attention_plot = model.evaluate(sentence)
print('Input: %s' % (sentence))
print('Predicted translation: {}'.format(result))
attention_plot = attention_plot[:len(result.split(' ')), :len(sentence.split(' '))]
if attention_image_path:
try:
plot_attention(attention_plot, sentence.split(' '), result.split(' '), attention_image_path)
except Exception as e:
print(e)
pass
if __name__ == "__main__":
if config.gpu_id > -1:
os.environ["CUDA_VISIBLE_DEVICES"] = str(config.gpu_id)
else:
os.environ["CUDA_VISIBLE_DEVICES"] = '-1'
inputs = [
'以 前 , 包 括 中 国 , 我 国 也 是 。',
'我 现 在 好 得 多 了 。',
'这几年前时间,',
'歌曲使人的感到快乐,',
'会能够大幅减少互相抱怨的情况。'
]
source_word2id = load_word_dict(config.save_src_vocab_path)
target_word2id = load_word_dict(config.save_trg_vocab_path)
model = Seq2SeqModel(source_word2id, target_word2id, embedding_dim=config.embedding_dim,
hidden_dim=config.hidden_dim,
batch_size=config.batch_size, maxlen=config.maxlen, checkpoint_path=config.model_dir,
gpu_id=config.gpu_id)
for id, i in enumerate(inputs):
img_path = os.path.join(config.output_dir, str(id) + ".png")
infer(model, i, img_path)
# result:
# input:由我起开始做。
# output:我开始做。
# input:没有解决这个问题,
# output:没有解决的问题,
# input:由我起开始做。
```
#### File: pycorrector/test/pycorrector_test.py
```python
import pycorrector
#@profile 内存监测
def test(path, result_path):
count, count_all = 0,0
#_badcase = open('../../positive_badcase.txt','w', encoding='utf-8')
with open(path, 'r', encoding='utf-8') as file, open(result_path,'w',encoding='utf-8') as wfile:
line = file.readline()
while line != None and line != '':
count_all += 1
# 用于测试sighan数据的部分代码
index, origin_string = line.strip().split(' ')[0], line.strip().split(' ')[1]
#if count_all == 4:
# break
# 用于测试笔录数据的部分代码
"""
origin_string = line.strip().split(',')[0]
if len(line.strip().split(',')) > 1:
origin_string += line.strip().split(',')[1]
corr_string, detail = pycorrector.correct(origin_string)
if str(detail) == "[]":
count += 1
else:
#wfile.write('{}\t{}\n'.format(corr_string, detail))
wfile.write('{}\t{}\t{}\n'.format(index, corr_string, detail))
print('{} / {}'.format(count, count_all))
"""
idx = index.strip().split('=')[1].strip(')')
idx,corr_string, detail = pycorrector.correct(idx,origin_string)
wfile.write('{}\t{}\t{}\n'.format(index, corr_string, detail))
line = file.readline()
if __name__ == '__main__':
path = './corpus/sighan15.txt'
result_path = './corpus/sighan15_result.txt'
test(path, result_path)
```
#### File: pycorrector/tests/eval_test.py
```python
import sys
sys.path.append("../")
import os
from pycorrector.utils.eval import eval_bcmi_data, get_bcmi_corpus, eval_sighan_corpus
pwd_path = os.path.abspath(os.path.dirname(__file__))
bcmi_path = os.path.join(pwd_path, '../pycorrector/data/cn/bcmi.txt')
clp_path = os.path.join(pwd_path, '../pycorrector/data/cn/clp14_C1.pkl')
sighan_path = os.path.join(pwd_path, '../pycorrector/data/cn/sighan15_A2.pkl')
cged_path = os.path.join(pwd_path, '../pycorrector/data/cn/CGED/CGED16_HSK_TrainingSet.xml')
def test_get_bcmi_data():
s = '青蛙是庄家的好朋友,我们要宝((保))护它们。'
print(get_bcmi_corpus(s))
def test_eval_bcmi_data():
rate, right_dict, wrong_dict = eval_bcmi_data(bcmi_path, True)
print('right rate:{}, right_dict:{}, wrong_dict:{}'.format(rate, right_dict, wrong_dict))
# right count: 104 ;sentence size: 383, right rate:0.271
def test_clp_data():
rate = eval_sighan_corpus(clp_path, True)
print('right rate:{}'.format(rate))
# rate:1.6
def test_sighan_data():
rate = eval_sighan_corpus(sighan_path, True)
print('right rate:{}'.format(rate))
# rate:1.53
``` |
{
"source": "john-parton/django-ltree-utils",
"score": 2
} |
#### File: django-ltree-utils/django_ltree_utils/forms.py
```python
from django import forms
def move_node_form_factory(manager):
class Form(forms.ModelForm):
position = forms.ChoiceField(choices=manager.Position.choices)
relative_to = forms.ModelChoiceField(
queryset=manager.all(), required=False
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.instance and self.instance.path:
self.fields['relative_to'].queryset = manager.exclude(
**{f'{manager.path_field}__descendant_of': getattr(self.instance, manager.path_field)}
)
position, relative_to = manager._get_relative_position(self.instance)
self.fields['position'].initial = position
self.fields['relative_to'].initial = relative_to
class Meta:
model = manager.model
exclude = [manager.path_field]
def clean(self, *args, **kwargs):
cleaned_data = super().clean(*args, **kwargs)
position = cleaned_data['position']
relative_to = True if manager.Position(position) == manager.Position.ROOT else cleaned_data['relative_to']
moves = manager._resolve_position(
self.instance, {
position: relative_to
}
)
self.cleaned_data['_moves'] = moves
def save(self, *args, **kwargs):
manager._bulk_move(self.cleaned_data['_moves'])
return super().save(*args, **kwargs)
return Form
```
#### File: django-ltree-utils/django_ltree_utils/position.py
```python
import typing
from django.db import models
from django.utils.translation import gettext_lazy as _
from .paths import Path, PathFactory
class RelativePosition(models.TextChoices):
CHILD = 'child_of', _("Child of")
FIRST_CHILD = 'first_child_of', ("First child of")
# Same functionality as child_of
LAST_CHILD = 'last_child_of', _("Last child of")
# These could be "before" and "after"
BEFORE = 'before', _("Before")
AFTER = 'after', _("After")
# Right-most root element
ROOT = 'root', _("Last root")
# Consider just making its own functions
# Or put back on manager or something
@classmethod
def resolve(cls, kwargs, path_field: str, path_factory: PathFactory) -> typing.Tuple[Path, typing.Optional[int]]:
""" Parse kwargs and normalize relative position to always be
tuple = (parent_path, nth-child)
"""
# Path field is used to unwrap/duck-type models that have a path attribute
positions: typing.Dict['RelativePosition', typing.Any] = {}
for position in cls:
try:
positions[position] = kwargs.pop(position.value)
except KeyError:
continue
if len(positions) != 1:
raise TypeError(f"Could not resolve position: {positions!r}")
position, relative_to = positions.popitem()
if position == cls.ROOT:
if relative_to is not True:
raise ValueError(f"Expected kwarg root=True, got root={relative_to!r}")
return [], None
# Duck-type model instances
# Might want to use isinstance instead?
if hasattr(relative_to, path_field):
relative_to = getattr(relative_to, path_field)
# TODO Better error handling here?
# Convert strings to lists?
if not isinstance(relative_to, list):
relative_to = relative_to.split('.')
# last_child_of is a more verbose alias for child_of
if position in {cls.CHILD, cls.LAST_CHILD}:
return relative_to, None
elif position == cls.FIRST_CHILD:
return relative_to, 0
elif position in {cls.BEFORE, cls.AFTER}:
parent, child_index = path_factory.split(relative_to)
if position == cls.AFTER:
child_index += 1
return parent, child_index
else:
# Should never get here
raise Exception
class SortedPosition(models.TextChoices):
CHILD = 'child_of', _("Child of")
SIBLING = 'sibling', _("Sibling of")
ROOT = 'root', _("Root")
@classmethod
def resolve(cls, kwargs, path_field: str, path_factory: PathFactory) -> typing.Tuple[Path, typing.Optional[int]]:
""" Parse kwargs and normalize relative position to always be
tuple = (parent_path, nth-child)
"""
# Path field is used to unwrap/duck-type models that have a path attribute
positions: typing.Dict['SortedPosition', typing.Any] = {}
for position in cls:
try:
positions[position] = kwargs.pop(position.value)
except KeyError:
continue
if len(positions) != 1:
raise TypeError(f"Could not resolve position: {positions!r}")
position, relative_to = positions.popitem()
if position == cls.ROOT:
if relative_to is not True:
raise ValueError(f"Expected kwarg root=True, got root={relative_to!r}")
return [], None
# Duck-type model instances
# Might want to use isinstance instead?
if hasattr(relative_to, path_field):
relative_to = getattr(relative_to, path_field)
# TODO Better error handling here?
# Convert strings to lists?
if not isinstance(relative_to, list):
relative_to = relative_to.split('.')
# last_child_of is a more verbose alias for child_of
if position == cls.CHILD:
return relative_to, None
elif position == cls.SIBLING:
parent, child_index = path_factory.split(relative_to)
return parent, None
else:
# Should never get here
raise Exception
``` |
{
"source": "john-parton/django-simple-history",
"score": 2
} |
#### File: simple_history/tests/admin.py
```python
from __future__ import unicode_literals
from django.contrib import admin
from simple_history.admin import SimpleHistoryAdmin
from simple_history.tests.external.models import ExternalModelWithCustomUserIdField
from .models import (
Book,
Choice,
ConcreteExternal,
Document,
Employee,
FileModel,
Paper,
Person,
Planet,
Poll,
)
class PersonAdmin(SimpleHistoryAdmin):
def has_change_permission(self, request, obj=None):
return False
class ChoiceAdmin(SimpleHistoryAdmin):
history_list_display = ["votes"]
class FileModelAdmin(SimpleHistoryAdmin):
def test_method(self, obj):
return "test_method_value"
history_list_display = ["title", "test_method"]
class PlanetAdmin(SimpleHistoryAdmin):
def test_method(self, obj):
return "test_method_value"
history_list_display = ["title", "test_method"]
admin.site.register(Poll, SimpleHistoryAdmin)
admin.site.register(Choice, ChoiceAdmin)
admin.site.register(Person, PersonAdmin)
admin.site.register(Book, SimpleHistoryAdmin)
admin.site.register(Document, SimpleHistoryAdmin)
admin.site.register(Paper, SimpleHistoryAdmin)
admin.site.register(Employee, SimpleHistoryAdmin)
admin.site.register(ConcreteExternal, SimpleHistoryAdmin)
admin.site.register(ExternalModelWithCustomUserIdField, SimpleHistoryAdmin)
admin.site.register(FileModel, FileModelAdmin)
admin.site.register(Planet, PlanetAdmin)
``` |
{
"source": "john-parton/mailchimp-asyncio",
"score": 2
} |
#### File: mailchimp_marketing_asyncio/models/account_contact.py
```python
import pprint
import re # noqa: F401
import six
class AccountContact(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'company': 'str',
'addr1': 'str',
'addr2': 'str',
'city': 'str',
'state': 'str',
'zip': 'str',
'country': 'str'
}
attribute_map = {
'company': 'company',
'addr1': 'addr1',
'addr2': 'addr2',
'city': 'city',
'state': 'state',
'zip': 'zip',
'country': 'country'
}
def __init__(self, company=None, addr1=None, addr2=None, city=None, state=None, zip=None, country=None): # noqa: E501
"""AccountContact - a model defined in Swagger""" # noqa: E501
self._company = None
self._addr1 = None
self._addr2 = None
self._city = None
self._state = None
self._zip = None
self._country = None
self.discriminator = None
if company is not None:
self.company = company
if addr1 is not None:
self.addr1 = addr1
if addr2 is not None:
self.addr2 = addr2
if city is not None:
self.city = city
if state is not None:
self.state = state
if zip is not None:
self.zip = zip
if country is not None:
self.country = country
@property
def company(self):
"""Gets the company of this AccountContact. # noqa: E501
The company name for the account. # noqa: E501
:return: The company of this AccountContact. # noqa: E501
:rtype: str
"""
return self._company
@company.setter
def company(self, company):
"""Sets the company of this AccountContact.
The company name for the account. # noqa: E501
:param company: The company of this AccountContact. # noqa: E501
:type: str
"""
self._company = company
@property
def addr1(self):
"""Gets the addr1 of this AccountContact. # noqa: E501
The street address for the account contact. # noqa: E501
:return: The addr1 of this AccountContact. # noqa: E501
:rtype: str
"""
return self._addr1
@addr1.setter
def addr1(self, addr1):
"""Sets the addr1 of this AccountContact.
The street address for the account contact. # noqa: E501
:param addr1: The addr1 of this AccountContact. # noqa: E501
:type: str
"""
self._addr1 = addr1
@property
def addr2(self):
"""Gets the addr2 of this AccountContact. # noqa: E501
The street address for the account contact. # noqa: E501
:return: The addr2 of this AccountContact. # noqa: E501
:rtype: str
"""
return self._addr2
@addr2.setter
def addr2(self, addr2):
"""Sets the addr2 of this AccountContact.
The street address for the account contact. # noqa: E501
:param addr2: The addr2 of this AccountContact. # noqa: E501
:type: str
"""
self._addr2 = addr2
@property
def city(self):
"""Gets the city of this AccountContact. # noqa: E501
The city for the account contact. # noqa: E501
:return: The city of this AccountContact. # noqa: E501
:rtype: str
"""
return self._city
@city.setter
def city(self, city):
"""Sets the city of this AccountContact.
The city for the account contact. # noqa: E501
:param city: The city of this AccountContact. # noqa: E501
:type: str
"""
self._city = city
@property
def state(self):
"""Gets the state of this AccountContact. # noqa: E501
The state for the account contact. # noqa: E501
:return: The state of this AccountContact. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this AccountContact.
The state for the account contact. # noqa: E501
:param state: The state of this AccountContact. # noqa: E501
:type: str
"""
self._state = state
@property
def zip(self):
"""Gets the zip of this AccountContact. # noqa: E501
The zip code for the account contact. # noqa: E501
:return: The zip of this AccountContact. # noqa: E501
:rtype: str
"""
return self._zip
@zip.setter
def zip(self, zip):
"""Sets the zip of this AccountContact.
The zip code for the account contact. # noqa: E501
:param zip: The zip of this AccountContact. # noqa: E501
:type: str
"""
self._zip = zip
@property
def country(self):
"""Gets the country of this AccountContact. # noqa: E501
The country for the account contact. # noqa: E501
:return: The country of this AccountContact. # noqa: E501
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this AccountContact.
The country for the account contact. # noqa: E501
:param country: The country of this AccountContact. # noqa: E501
:type: str
"""
self._country = country
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AccountContact, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AccountContact):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/api_health_status.py
```python
import pprint
import re # noqa: F401
import six
class APIHealthStatus(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'health_status': 'str'
}
attribute_map = {
'health_status': 'health_status'
}
def __init__(self, health_status=None): # noqa: E501
"""APIHealthStatus - a model defined in Swagger""" # noqa: E501
self._health_status = None
self.discriminator = None
if health_status is not None:
self.health_status = health_status
@property
def health_status(self):
"""Gets the health_status of this APIHealthStatus. # noqa: E501
This will return a constant string value if the request is successful. Ex. \"Everything's Chimpy!\" # noqa: E501
:return: The health_status of this APIHealthStatus. # noqa: E501
:rtype: str
"""
return self._health_status
@health_status.setter
def health_status(self, health_status):
"""Sets the health_status of this APIHealthStatus.
This will return a constant string value if the request is successful. Ex. \"Everything's Chimpy!\" # noqa: E501
:param health_status: The health_status of this APIHealthStatus. # noqa: E501
:type: str
"""
self._health_status = health_status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(APIHealthStatus, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, APIHealthStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/body1.py
```python
import pprint
import re # noqa: F401
import six
class Body1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'schedule_time': 'datetime',
'timewarp': 'bool',
'batch_delivery': 'BatchDelivery'
}
attribute_map = {
'schedule_time': 'schedule_time',
'timewarp': 'timewarp',
'batch_delivery': 'batch_delivery'
}
def __init__(self, schedule_time=None, timewarp=None, batch_delivery=None): # noqa: E501
"""Body1 - a model defined in Swagger""" # noqa: E501
self._schedule_time = None
self._timewarp = None
self._batch_delivery = None
self.discriminator = None
self.schedule_time = schedule_time
if timewarp is not None:
self.timewarp = timewarp
if batch_delivery is not None:
self.batch_delivery = batch_delivery
@property
def schedule_time(self):
"""Gets the schedule_time of this Body1. # noqa: E501
The UTC date and time to schedule the campaign for delivery in ISO 8601 format. Campaigns may only be scheduled to send on the quarter-hour (:00, :15, :30, :45). # noqa: E501
:return: The schedule_time of this Body1. # noqa: E501
:rtype: datetime
"""
return self._schedule_time
@schedule_time.setter
def schedule_time(self, schedule_time):
"""Sets the schedule_time of this Body1.
The UTC date and time to schedule the campaign for delivery in ISO 8601 format. Campaigns may only be scheduled to send on the quarter-hour (:00, :15, :30, :45). # noqa: E501
:param schedule_time: The schedule_time of this Body1. # noqa: E501
:type: datetime
"""
if schedule_time is None:
raise ValueError("Invalid value for `schedule_time`, must not be `None`") # noqa: E501
self._schedule_time = schedule_time
@property
def timewarp(self):
"""Gets the timewarp of this Body1. # noqa: E501
Choose whether the campaign should use [Timewarp](https://mailchimp.com/help/use-timewarp/) when sending. Campaigns scheduled with Timewarp are localized based on the recipients' time zones. For example, a Timewarp campaign with a `schedule_time` of 13:00 will be sent to each recipient at 1:00pm in their local time. Cannot be set to `true` for campaigns using [Batch Delivery](https://mailchimp.com/help/schedule-batch-delivery/). # noqa: E501
:return: The timewarp of this Body1. # noqa: E501
:rtype: bool
"""
return self._timewarp
@timewarp.setter
def timewarp(self, timewarp):
"""Sets the timewarp of this Body1.
Choose whether the campaign should use [Timewarp](https://mailchimp.com/help/use-timewarp/) when sending. Campaigns scheduled with Timewarp are localized based on the recipients' time zones. For example, a Timewarp campaign with a `schedule_time` of 13:00 will be sent to each recipient at 1:00pm in their local time. Cannot be set to `true` for campaigns using [Batch Delivery](https://mailchimp.com/help/schedule-batch-delivery/). # noqa: E501
:param timewarp: The timewarp of this Body1. # noqa: E501
:type: bool
"""
self._timewarp = timewarp
@property
def batch_delivery(self):
"""Gets the batch_delivery of this Body1. # noqa: E501
:return: The batch_delivery of this Body1. # noqa: E501
:rtype: BatchDelivery
"""
return self._batch_delivery
@batch_delivery.setter
def batch_delivery(self, batch_delivery):
"""Sets the batch_delivery of this Body1.
:param batch_delivery: The batch_delivery of this Body1. # noqa: E501
:type: BatchDelivery
"""
self._batch_delivery = batch_delivery
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Body1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Body1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/campaign_content.py
```python
import pprint
import re # noqa: F401
import six
class CampaignContent(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'variate_contents': 'list[CampaignContentVariateContents]',
'plain_text': 'str',
'html': 'str',
'archive_html': 'str',
'links': 'list[ResourceLink]'
}
attribute_map = {
'variate_contents': 'variate_contents',
'plain_text': 'plain_text',
'html': 'html',
'archive_html': 'archive_html',
'links': '_links'
}
def __init__(self, variate_contents=None, plain_text=None, html=None, archive_html=None, links=None): # noqa: E501
"""CampaignContent - a model defined in Swagger""" # noqa: E501
self._variate_contents = None
self._plain_text = None
self._html = None
self._archive_html = None
self._links = None
self.discriminator = None
if variate_contents is not None:
self.variate_contents = variate_contents
if plain_text is not None:
self.plain_text = plain_text
if html is not None:
self.html = html
if archive_html is not None:
self.archive_html = archive_html
if links is not None:
self.links = links
@property
def variate_contents(self):
"""Gets the variate_contents of this CampaignContent. # noqa: E501
Content options for multivariate campaigns. # noqa: E501
:return: The variate_contents of this CampaignContent. # noqa: E501
:rtype: list[CampaignContentVariateContents]
"""
return self._variate_contents
@variate_contents.setter
def variate_contents(self, variate_contents):
"""Sets the variate_contents of this CampaignContent.
Content options for multivariate campaigns. # noqa: E501
:param variate_contents: The variate_contents of this CampaignContent. # noqa: E501
:type: list[CampaignContentVariateContents]
"""
self._variate_contents = variate_contents
@property
def plain_text(self):
"""Gets the plain_text of this CampaignContent. # noqa: E501
The plain-text portion of the campaign. If left unspecified, we'll generate this automatically. # noqa: E501
:return: The plain_text of this CampaignContent. # noqa: E501
:rtype: str
"""
return self._plain_text
@plain_text.setter
def plain_text(self, plain_text):
"""Sets the plain_text of this CampaignContent.
The plain-text portion of the campaign. If left unspecified, we'll generate this automatically. # noqa: E501
:param plain_text: The plain_text of this CampaignContent. # noqa: E501
:type: str
"""
self._plain_text = plain_text
@property
def html(self):
"""Gets the html of this CampaignContent. # noqa: E501
The raw HTML for the campaign. # noqa: E501
:return: The html of this CampaignContent. # noqa: E501
:rtype: str
"""
return self._html
@html.setter
def html(self, html):
"""Sets the html of this CampaignContent.
The raw HTML for the campaign. # noqa: E501
:param html: The html of this CampaignContent. # noqa: E501
:type: str
"""
self._html = html
@property
def archive_html(self):
"""Gets the archive_html of this CampaignContent. # noqa: E501
The Archive HTML for the campaign. # noqa: E501
:return: The archive_html of this CampaignContent. # noqa: E501
:rtype: str
"""
return self._archive_html
@archive_html.setter
def archive_html(self, archive_html):
"""Sets the archive_html of this CampaignContent.
The Archive HTML for the campaign. # noqa: E501
:param archive_html: The archive_html of this CampaignContent. # noqa: E501
:type: str
"""
self._archive_html = archive_html
@property
def links(self):
"""Gets the links of this CampaignContent. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this CampaignContent. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this CampaignContent.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this CampaignContent. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CampaignContent, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CampaignContent):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/campaign_report.py
```python
import pprint
import re # noqa: F401
import six
class CampaignReport(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'campaign_title': 'str',
'type': 'str',
'list_id': 'str',
'list_is_active': 'bool',
'list_name': 'str',
'subject_line': 'str',
'preview_text': 'str',
'emails_sent': 'int',
'abuse_reports': 'int',
'unsubscribed': 'int',
'send_time': 'datetime',
'rss_last_send': 'datetime',
'bounces': 'Bounces',
'forwards': 'Forwards',
'opens': 'Opens',
'clicks': 'Clicks',
'facebook_likes': 'FacebookLikes',
'industry_stats': 'IndustryStats1',
'list_stats': 'ListStats',
'ab_split': 'ABSplitStats',
'timewarp': 'list[CampaignReports1Timewarp]',
'timeseries': 'list[CampaignReports1Timeseries]',
'share_report': 'ShareReport',
'ecommerce': 'ECommerceReport1',
'delivery_status': 'CampaignDeliveryStatus',
'links': 'list[ResourceLink]'
}
attribute_map = {
'id': 'id',
'campaign_title': 'campaign_title',
'type': 'type',
'list_id': 'list_id',
'list_is_active': 'list_is_active',
'list_name': 'list_name',
'subject_line': 'subject_line',
'preview_text': 'preview_text',
'emails_sent': 'emails_sent',
'abuse_reports': 'abuse_reports',
'unsubscribed': 'unsubscribed',
'send_time': 'send_time',
'rss_last_send': 'rss_last_send',
'bounces': 'bounces',
'forwards': 'forwards',
'opens': 'opens',
'clicks': 'clicks',
'facebook_likes': 'facebook_likes',
'industry_stats': 'industry_stats',
'list_stats': 'list_stats',
'ab_split': 'ab_split',
'timewarp': 'timewarp',
'timeseries': 'timeseries',
'share_report': 'share_report',
'ecommerce': 'ecommerce',
'delivery_status': 'delivery_status',
'links': '_links'
}
def __init__(self, id=None, campaign_title=None, type=None, list_id=None, list_is_active=None, list_name=None, subject_line=None, preview_text=None, emails_sent=None, abuse_reports=None, unsubscribed=None, send_time=None, rss_last_send=None, bounces=None, forwards=None, opens=None, clicks=None, facebook_likes=None, industry_stats=None, list_stats=None, ab_split=None, timewarp=None, timeseries=None, share_report=None, ecommerce=None, delivery_status=None, links=None): # noqa: E501
"""CampaignReport - a model defined in Swagger""" # noqa: E501
self._id = None
self._campaign_title = None
self._type = None
self._list_id = None
self._list_is_active = None
self._list_name = None
self._subject_line = None
self._preview_text = None
self._emails_sent = None
self._abuse_reports = None
self._unsubscribed = None
self._send_time = None
self._rss_last_send = None
self._bounces = None
self._forwards = None
self._opens = None
self._clicks = None
self._facebook_likes = None
self._industry_stats = None
self._list_stats = None
self._ab_split = None
self._timewarp = None
self._timeseries = None
self._share_report = None
self._ecommerce = None
self._delivery_status = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if campaign_title is not None:
self.campaign_title = campaign_title
if type is not None:
self.type = type
if list_id is not None:
self.list_id = list_id
if list_is_active is not None:
self.list_is_active = list_is_active
if list_name is not None:
self.list_name = list_name
if subject_line is not None:
self.subject_line = subject_line
if preview_text is not None:
self.preview_text = preview_text
if emails_sent is not None:
self.emails_sent = emails_sent
if abuse_reports is not None:
self.abuse_reports = abuse_reports
if unsubscribed is not None:
self.unsubscribed = unsubscribed
if send_time is not None:
self.send_time = send_time
if rss_last_send is not None:
self.rss_last_send = rss_last_send
if bounces is not None:
self.bounces = bounces
if forwards is not None:
self.forwards = forwards
if opens is not None:
self.opens = opens
if clicks is not None:
self.clicks = clicks
if facebook_likes is not None:
self.facebook_likes = facebook_likes
if industry_stats is not None:
self.industry_stats = industry_stats
if list_stats is not None:
self.list_stats = list_stats
if ab_split is not None:
self.ab_split = ab_split
if timewarp is not None:
self.timewarp = timewarp
if timeseries is not None:
self.timeseries = timeseries
if share_report is not None:
self.share_report = share_report
if ecommerce is not None:
self.ecommerce = ecommerce
if delivery_status is not None:
self.delivery_status = delivery_status
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this CampaignReport. # noqa: E501
A string that uniquely identifies this campaign. # noqa: E501
:return: The id of this CampaignReport. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this CampaignReport.
A string that uniquely identifies this campaign. # noqa: E501
:param id: The id of this CampaignReport. # noqa: E501
:type: str
"""
self._id = id
@property
def campaign_title(self):
"""Gets the campaign_title of this CampaignReport. # noqa: E501
The title of the campaign. # noqa: E501
:return: The campaign_title of this CampaignReport. # noqa: E501
:rtype: str
"""
return self._campaign_title
@campaign_title.setter
def campaign_title(self, campaign_title):
"""Sets the campaign_title of this CampaignReport.
The title of the campaign. # noqa: E501
:param campaign_title: The campaign_title of this CampaignReport. # noqa: E501
:type: str
"""
self._campaign_title = campaign_title
@property
def type(self):
"""Gets the type of this CampaignReport. # noqa: E501
The type of campaign (regular, plain-text, ab_split, rss, automation, variate, or auto). # noqa: E501
:return: The type of this CampaignReport. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this CampaignReport.
The type of campaign (regular, plain-text, ab_split, rss, automation, variate, or auto). # noqa: E501
:param type: The type of this CampaignReport. # noqa: E501
:type: str
"""
self._type = type
@property
def list_id(self):
"""Gets the list_id of this CampaignReport. # noqa: E501
The unique list id. # noqa: E501
:return: The list_id of this CampaignReport. # noqa: E501
:rtype: str
"""
return self._list_id
@list_id.setter
def list_id(self, list_id):
"""Sets the list_id of this CampaignReport.
The unique list id. # noqa: E501
:param list_id: The list_id of this CampaignReport. # noqa: E501
:type: str
"""
self._list_id = list_id
@property
def list_is_active(self):
"""Gets the list_is_active of this CampaignReport. # noqa: E501
The status of the list used, namely if it's deleted or disabled. # noqa: E501
:return: The list_is_active of this CampaignReport. # noqa: E501
:rtype: bool
"""
return self._list_is_active
@list_is_active.setter
def list_is_active(self, list_is_active):
"""Sets the list_is_active of this CampaignReport.
The status of the list used, namely if it's deleted or disabled. # noqa: E501
:param list_is_active: The list_is_active of this CampaignReport. # noqa: E501
:type: bool
"""
self._list_is_active = list_is_active
@property
def list_name(self):
"""Gets the list_name of this CampaignReport. # noqa: E501
The name of the list. # noqa: E501
:return: The list_name of this CampaignReport. # noqa: E501
:rtype: str
"""
return self._list_name
@list_name.setter
def list_name(self, list_name):
"""Sets the list_name of this CampaignReport.
The name of the list. # noqa: E501
:param list_name: The list_name of this CampaignReport. # noqa: E501
:type: str
"""
self._list_name = list_name
@property
def subject_line(self):
"""Gets the subject_line of this CampaignReport. # noqa: E501
The subject line for the campaign. # noqa: E501
:return: The subject_line of this CampaignReport. # noqa: E501
:rtype: str
"""
return self._subject_line
@subject_line.setter
def subject_line(self, subject_line):
"""Sets the subject_line of this CampaignReport.
The subject line for the campaign. # noqa: E501
:param subject_line: The subject_line of this CampaignReport. # noqa: E501
:type: str
"""
self._subject_line = subject_line
@property
def preview_text(self):
"""Gets the preview_text of this CampaignReport. # noqa: E501
The preview text for the campaign. # noqa: E501
:return: The preview_text of this CampaignReport. # noqa: E501
:rtype: str
"""
return self._preview_text
@preview_text.setter
def preview_text(self, preview_text):
"""Sets the preview_text of this CampaignReport.
The preview text for the campaign. # noqa: E501
:param preview_text: The preview_text of this CampaignReport. # noqa: E501
:type: str
"""
self._preview_text = preview_text
@property
def emails_sent(self):
"""Gets the emails_sent of this CampaignReport. # noqa: E501
The total number of emails sent for this campaign. # noqa: E501
:return: The emails_sent of this CampaignReport. # noqa: E501
:rtype: int
"""
return self._emails_sent
@emails_sent.setter
def emails_sent(self, emails_sent):
"""Sets the emails_sent of this CampaignReport.
The total number of emails sent for this campaign. # noqa: E501
:param emails_sent: The emails_sent of this CampaignReport. # noqa: E501
:type: int
"""
self._emails_sent = emails_sent
@property
def abuse_reports(self):
"""Gets the abuse_reports of this CampaignReport. # noqa: E501
The number of abuse reports generated for this campaign. # noqa: E501
:return: The abuse_reports of this CampaignReport. # noqa: E501
:rtype: int
"""
return self._abuse_reports
@abuse_reports.setter
def abuse_reports(self, abuse_reports):
"""Sets the abuse_reports of this CampaignReport.
The number of abuse reports generated for this campaign. # noqa: E501
:param abuse_reports: The abuse_reports of this CampaignReport. # noqa: E501
:type: int
"""
self._abuse_reports = abuse_reports
@property
def unsubscribed(self):
"""Gets the unsubscribed of this CampaignReport. # noqa: E501
The total number of unsubscribed members for this campaign. # noqa: E501
:return: The unsubscribed of this CampaignReport. # noqa: E501
:rtype: int
"""
return self._unsubscribed
@unsubscribed.setter
def unsubscribed(self, unsubscribed):
"""Sets the unsubscribed of this CampaignReport.
The total number of unsubscribed members for this campaign. # noqa: E501
:param unsubscribed: The unsubscribed of this CampaignReport. # noqa: E501
:type: int
"""
self._unsubscribed = unsubscribed
@property
def send_time(self):
"""Gets the send_time of this CampaignReport. # noqa: E501
The date and time a campaign was sent in ISO 8601 format. # noqa: E501
:return: The send_time of this CampaignReport. # noqa: E501
:rtype: datetime
"""
return self._send_time
@send_time.setter
def send_time(self, send_time):
"""Sets the send_time of this CampaignReport.
The date and time a campaign was sent in ISO 8601 format. # noqa: E501
:param send_time: The send_time of this CampaignReport. # noqa: E501
:type: datetime
"""
self._send_time = send_time
@property
def rss_last_send(self):
"""Gets the rss_last_send of this CampaignReport. # noqa: E501
For RSS campaigns, the date and time of the last send in ISO 8601 format. # noqa: E501
:return: The rss_last_send of this CampaignReport. # noqa: E501
:rtype: datetime
"""
return self._rss_last_send
@rss_last_send.setter
def rss_last_send(self, rss_last_send):
"""Sets the rss_last_send of this CampaignReport.
For RSS campaigns, the date and time of the last send in ISO 8601 format. # noqa: E501
:param rss_last_send: The rss_last_send of this CampaignReport. # noqa: E501
:type: datetime
"""
self._rss_last_send = rss_last_send
@property
def bounces(self):
"""Gets the bounces of this CampaignReport. # noqa: E501
:return: The bounces of this CampaignReport. # noqa: E501
:rtype: Bounces
"""
return self._bounces
@bounces.setter
def bounces(self, bounces):
"""Sets the bounces of this CampaignReport.
:param bounces: The bounces of this CampaignReport. # noqa: E501
:type: Bounces
"""
self._bounces = bounces
@property
def forwards(self):
"""Gets the forwards of this CampaignReport. # noqa: E501
:return: The forwards of this CampaignReport. # noqa: E501
:rtype: Forwards
"""
return self._forwards
@forwards.setter
def forwards(self, forwards):
"""Sets the forwards of this CampaignReport.
:param forwards: The forwards of this CampaignReport. # noqa: E501
:type: Forwards
"""
self._forwards = forwards
@property
def opens(self):
"""Gets the opens of this CampaignReport. # noqa: E501
:return: The opens of this CampaignReport. # noqa: E501
:rtype: Opens
"""
return self._opens
@opens.setter
def opens(self, opens):
"""Sets the opens of this CampaignReport.
:param opens: The opens of this CampaignReport. # noqa: E501
:type: Opens
"""
self._opens = opens
@property
def clicks(self):
"""Gets the clicks of this CampaignReport. # noqa: E501
:return: The clicks of this CampaignReport. # noqa: E501
:rtype: Clicks
"""
return self._clicks
@clicks.setter
def clicks(self, clicks):
"""Sets the clicks of this CampaignReport.
:param clicks: The clicks of this CampaignReport. # noqa: E501
:type: Clicks
"""
self._clicks = clicks
@property
def facebook_likes(self):
"""Gets the facebook_likes of this CampaignReport. # noqa: E501
:return: The facebook_likes of this CampaignReport. # noqa: E501
:rtype: FacebookLikes
"""
return self._facebook_likes
@facebook_likes.setter
def facebook_likes(self, facebook_likes):
"""Sets the facebook_likes of this CampaignReport.
:param facebook_likes: The facebook_likes of this CampaignReport. # noqa: E501
:type: FacebookLikes
"""
self._facebook_likes = facebook_likes
@property
def industry_stats(self):
"""Gets the industry_stats of this CampaignReport. # noqa: E501
:return: The industry_stats of this CampaignReport. # noqa: E501
:rtype: IndustryStats1
"""
return self._industry_stats
@industry_stats.setter
def industry_stats(self, industry_stats):
"""Sets the industry_stats of this CampaignReport.
:param industry_stats: The industry_stats of this CampaignReport. # noqa: E501
:type: IndustryStats1
"""
self._industry_stats = industry_stats
@property
def list_stats(self):
"""Gets the list_stats of this CampaignReport. # noqa: E501
:return: The list_stats of this CampaignReport. # noqa: E501
:rtype: ListStats
"""
return self._list_stats
@list_stats.setter
def list_stats(self, list_stats):
"""Sets the list_stats of this CampaignReport.
:param list_stats: The list_stats of this CampaignReport. # noqa: E501
:type: ListStats
"""
self._list_stats = list_stats
@property
def ab_split(self):
"""Gets the ab_split of this CampaignReport. # noqa: E501
:return: The ab_split of this CampaignReport. # noqa: E501
:rtype: ABSplitStats
"""
return self._ab_split
@ab_split.setter
def ab_split(self, ab_split):
"""Sets the ab_split of this CampaignReport.
:param ab_split: The ab_split of this CampaignReport. # noqa: E501
:type: ABSplitStats
"""
self._ab_split = ab_split
@property
def timewarp(self):
"""Gets the timewarp of this CampaignReport. # noqa: E501
An hourly breakdown of sends, opens, and clicks if a campaign is sent using timewarp. # noqa: E501
:return: The timewarp of this CampaignReport. # noqa: E501
:rtype: list[CampaignReports1Timewarp]
"""
return self._timewarp
@timewarp.setter
def timewarp(self, timewarp):
"""Sets the timewarp of this CampaignReport.
An hourly breakdown of sends, opens, and clicks if a campaign is sent using timewarp. # noqa: E501
:param timewarp: The timewarp of this CampaignReport. # noqa: E501
:type: list[CampaignReports1Timewarp]
"""
self._timewarp = timewarp
@property
def timeseries(self):
"""Gets the timeseries of this CampaignReport. # noqa: E501
An hourly breakdown of the performance of the campaign over the first 24 hours. # noqa: E501
:return: The timeseries of this CampaignReport. # noqa: E501
:rtype: list[CampaignReports1Timeseries]
"""
return self._timeseries
@timeseries.setter
def timeseries(self, timeseries):
"""Sets the timeseries of this CampaignReport.
An hourly breakdown of the performance of the campaign over the first 24 hours. # noqa: E501
:param timeseries: The timeseries of this CampaignReport. # noqa: E501
:type: list[CampaignReports1Timeseries]
"""
self._timeseries = timeseries
@property
def share_report(self):
"""Gets the share_report of this CampaignReport. # noqa: E501
:return: The share_report of this CampaignReport. # noqa: E501
:rtype: ShareReport
"""
return self._share_report
@share_report.setter
def share_report(self, share_report):
"""Sets the share_report of this CampaignReport.
:param share_report: The share_report of this CampaignReport. # noqa: E501
:type: ShareReport
"""
self._share_report = share_report
@property
def ecommerce(self):
"""Gets the ecommerce of this CampaignReport. # noqa: E501
:return: The ecommerce of this CampaignReport. # noqa: E501
:rtype: ECommerceReport1
"""
return self._ecommerce
@ecommerce.setter
def ecommerce(self, ecommerce):
"""Sets the ecommerce of this CampaignReport.
:param ecommerce: The ecommerce of this CampaignReport. # noqa: E501
:type: ECommerceReport1
"""
self._ecommerce = ecommerce
@property
def delivery_status(self):
"""Gets the delivery_status of this CampaignReport. # noqa: E501
:return: The delivery_status of this CampaignReport. # noqa: E501
:rtype: CampaignDeliveryStatus
"""
return self._delivery_status
@delivery_status.setter
def delivery_status(self, delivery_status):
"""Sets the delivery_status of this CampaignReport.
:param delivery_status: The delivery_status of this CampaignReport. # noqa: E501
:type: CampaignDeliveryStatus
"""
self._delivery_status = delivery_status
@property
def links(self):
"""Gets the links of this CampaignReport. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this CampaignReport. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this CampaignReport.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this CampaignReport. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CampaignReport, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CampaignReport):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/campaign_settings4.py
```python
import pprint
import re # noqa: F401
import six
class CampaignSettings4(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'subject_line': 'str',
'preview_text': 'str',
'title': 'str',
'from_name': 'str',
'reply_to': 'str',
'use_conversation': 'bool',
'to_name': 'str',
'folder_id': 'str',
'authenticate': 'bool',
'auto_footer': 'bool',
'inline_css': 'bool',
'auto_tweet': 'bool',
'auto_fb_post': 'list[str]',
'fb_comments': 'bool',
'template_id': 'int'
}
attribute_map = {
'subject_line': 'subject_line',
'preview_text': 'preview_text',
'title': 'title',
'from_name': 'from_name',
'reply_to': 'reply_to',
'use_conversation': 'use_conversation',
'to_name': 'to_name',
'folder_id': 'folder_id',
'authenticate': 'authenticate',
'auto_footer': 'auto_footer',
'inline_css': 'inline_css',
'auto_tweet': 'auto_tweet',
'auto_fb_post': 'auto_fb_post',
'fb_comments': 'fb_comments',
'template_id': 'template_id'
}
def __init__(self, subject_line=None, preview_text=None, title=None, from_name=None, reply_to=None, use_conversation=None, to_name=None, folder_id=None, authenticate=None, auto_footer=None, inline_css=None, auto_tweet=None, auto_fb_post=None, fb_comments=None, template_id=None): # noqa: E501
"""CampaignSettings4 - a model defined in Swagger""" # noqa: E501
self._subject_line = None
self._preview_text = None
self._title = None
self._from_name = None
self._reply_to = None
self._use_conversation = None
self._to_name = None
self._folder_id = None
self._authenticate = None
self._auto_footer = None
self._inline_css = None
self._auto_tweet = None
self._auto_fb_post = None
self._fb_comments = None
self._template_id = None
self.discriminator = None
self.subject_line = subject_line
if preview_text is not None:
self.preview_text = preview_text
if title is not None:
self.title = title
self.from_name = from_name
self.reply_to = reply_to
if use_conversation is not None:
self.use_conversation = use_conversation
if to_name is not None:
self.to_name = to_name
if folder_id is not None:
self.folder_id = folder_id
if authenticate is not None:
self.authenticate = authenticate
if auto_footer is not None:
self.auto_footer = auto_footer
if inline_css is not None:
self.inline_css = inline_css
if auto_tweet is not None:
self.auto_tweet = auto_tweet
if auto_fb_post is not None:
self.auto_fb_post = auto_fb_post
if fb_comments is not None:
self.fb_comments = fb_comments
if template_id is not None:
self.template_id = template_id
@property
def subject_line(self):
"""Gets the subject_line of this CampaignSettings4. # noqa: E501
The subject line for the campaign. # noqa: E501
:return: The subject_line of this CampaignSettings4. # noqa: E501
:rtype: str
"""
return self._subject_line
@subject_line.setter
def subject_line(self, subject_line):
"""Sets the subject_line of this CampaignSettings4.
The subject line for the campaign. # noqa: E501
:param subject_line: The subject_line of this CampaignSettings4. # noqa: E501
:type: str
"""
if subject_line is None:
raise ValueError("Invalid value for `subject_line`, must not be `None`") # noqa: E501
self._subject_line = subject_line
@property
def preview_text(self):
"""Gets the preview_text of this CampaignSettings4. # noqa: E501
The preview text for the campaign. # noqa: E501
:return: The preview_text of this CampaignSettings4. # noqa: E501
:rtype: str
"""
return self._preview_text
@preview_text.setter
def preview_text(self, preview_text):
"""Sets the preview_text of this CampaignSettings4.
The preview text for the campaign. # noqa: E501
:param preview_text: The preview_text of this CampaignSettings4. # noqa: E501
:type: str
"""
self._preview_text = preview_text
@property
def title(self):
"""Gets the title of this CampaignSettings4. # noqa: E501
The title of the campaign. # noqa: E501
:return: The title of this CampaignSettings4. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this CampaignSettings4.
The title of the campaign. # noqa: E501
:param title: The title of this CampaignSettings4. # noqa: E501
:type: str
"""
self._title = title
@property
def from_name(self):
"""Gets the from_name of this CampaignSettings4. # noqa: E501
The 'from' name on the campaign (not an email address). # noqa: E501
:return: The from_name of this CampaignSettings4. # noqa: E501
:rtype: str
"""
return self._from_name
@from_name.setter
def from_name(self, from_name):
"""Sets the from_name of this CampaignSettings4.
The 'from' name on the campaign (not an email address). # noqa: E501
:param from_name: The from_name of this CampaignSettings4. # noqa: E501
:type: str
"""
if from_name is None:
raise ValueError("Invalid value for `from_name`, must not be `None`") # noqa: E501
self._from_name = from_name
@property
def reply_to(self):
"""Gets the reply_to of this CampaignSettings4. # noqa: E501
The reply-to email address for the campaign. # noqa: E501
:return: The reply_to of this CampaignSettings4. # noqa: E501
:rtype: str
"""
return self._reply_to
@reply_to.setter
def reply_to(self, reply_to):
"""Sets the reply_to of this CampaignSettings4.
The reply-to email address for the campaign. # noqa: E501
:param reply_to: The reply_to of this CampaignSettings4. # noqa: E501
:type: str
"""
if reply_to is None:
raise ValueError("Invalid value for `reply_to`, must not be `None`") # noqa: E501
self._reply_to = reply_to
@property
def use_conversation(self):
"""Gets the use_conversation of this CampaignSettings4. # noqa: E501
Use Mailchimp Conversation feature to manage out-of-office replies. # noqa: E501
:return: The use_conversation of this CampaignSettings4. # noqa: E501
:rtype: bool
"""
return self._use_conversation
@use_conversation.setter
def use_conversation(self, use_conversation):
"""Sets the use_conversation of this CampaignSettings4.
Use Mailchimp Conversation feature to manage out-of-office replies. # noqa: E501
:param use_conversation: The use_conversation of this CampaignSettings4. # noqa: E501
:type: bool
"""
self._use_conversation = use_conversation
@property
def to_name(self):
"""Gets the to_name of this CampaignSettings4. # noqa: E501
The campaign's custom 'To' name. Typically the first name [audience field](https://mailchimp.com/help/getting-started-with-merge-tags/). # noqa: E501
:return: The to_name of this CampaignSettings4. # noqa: E501
:rtype: str
"""
return self._to_name
@to_name.setter
def to_name(self, to_name):
"""Sets the to_name of this CampaignSettings4.
The campaign's custom 'To' name. Typically the first name [audience field](https://mailchimp.com/help/getting-started-with-merge-tags/). # noqa: E501
:param to_name: The to_name of this CampaignSettings4. # noqa: E501
:type: str
"""
self._to_name = to_name
@property
def folder_id(self):
"""Gets the folder_id of this CampaignSettings4. # noqa: E501
If the campaign is listed in a folder, the id for that folder. # noqa: E501
:return: The folder_id of this CampaignSettings4. # noqa: E501
:rtype: str
"""
return self._folder_id
@folder_id.setter
def folder_id(self, folder_id):
"""Sets the folder_id of this CampaignSettings4.
If the campaign is listed in a folder, the id for that folder. # noqa: E501
:param folder_id: The folder_id of this CampaignSettings4. # noqa: E501
:type: str
"""
self._folder_id = folder_id
@property
def authenticate(self):
"""Gets the authenticate of this CampaignSettings4. # noqa: E501
Whether Mailchimp [authenticated](https://mailchimp.com/help/about-email-authentication/) the campaign. Defaults to `true`. # noqa: E501
:return: The authenticate of this CampaignSettings4. # noqa: E501
:rtype: bool
"""
return self._authenticate
@authenticate.setter
def authenticate(self, authenticate):
"""Sets the authenticate of this CampaignSettings4.
Whether Mailchimp [authenticated](https://mailchimp.com/help/about-email-authentication/) the campaign. Defaults to `true`. # noqa: E501
:param authenticate: The authenticate of this CampaignSettings4. # noqa: E501
:type: bool
"""
self._authenticate = authenticate
@property
def auto_footer(self):
"""Gets the auto_footer of this CampaignSettings4. # noqa: E501
Automatically append Mailchimp's [default footer](https://mailchimp.com/help/about-campaign-footers/) to the campaign. # noqa: E501
:return: The auto_footer of this CampaignSettings4. # noqa: E501
:rtype: bool
"""
return self._auto_footer
@auto_footer.setter
def auto_footer(self, auto_footer):
"""Sets the auto_footer of this CampaignSettings4.
Automatically append Mailchimp's [default footer](https://mailchimp.com/help/about-campaign-footers/) to the campaign. # noqa: E501
:param auto_footer: The auto_footer of this CampaignSettings4. # noqa: E501
:type: bool
"""
self._auto_footer = auto_footer
@property
def inline_css(self):
"""Gets the inline_css of this CampaignSettings4. # noqa: E501
Automatically inline the CSS included with the campaign content. # noqa: E501
:return: The inline_css of this CampaignSettings4. # noqa: E501
:rtype: bool
"""
return self._inline_css
@inline_css.setter
def inline_css(self, inline_css):
"""Sets the inline_css of this CampaignSettings4.
Automatically inline the CSS included with the campaign content. # noqa: E501
:param inline_css: The inline_css of this CampaignSettings4. # noqa: E501
:type: bool
"""
self._inline_css = inline_css
@property
def auto_tweet(self):
"""Gets the auto_tweet of this CampaignSettings4. # noqa: E501
Automatically tweet a link to the [campaign archive](https://mailchimp.com/help/about-email-campaign-archives-and-pages/) page when the campaign is sent. # noqa: E501
:return: The auto_tweet of this CampaignSettings4. # noqa: E501
:rtype: bool
"""
return self._auto_tweet
@auto_tweet.setter
def auto_tweet(self, auto_tweet):
"""Sets the auto_tweet of this CampaignSettings4.
Automatically tweet a link to the [campaign archive](https://mailchimp.com/help/about-email-campaign-archives-and-pages/) page when the campaign is sent. # noqa: E501
:param auto_tweet: The auto_tweet of this CampaignSettings4. # noqa: E501
:type: bool
"""
self._auto_tweet = auto_tweet
@property
def auto_fb_post(self):
"""Gets the auto_fb_post of this CampaignSettings4. # noqa: E501
An array of [Facebook](https://mailchimp.com/help/connect-or-disconnect-the-facebook-integration/) page ids to auto-post to. # noqa: E501
:return: The auto_fb_post of this CampaignSettings4. # noqa: E501
:rtype: list[str]
"""
return self._auto_fb_post
@auto_fb_post.setter
def auto_fb_post(self, auto_fb_post):
"""Sets the auto_fb_post of this CampaignSettings4.
An array of [Facebook](https://mailchimp.com/help/connect-or-disconnect-the-facebook-integration/) page ids to auto-post to. # noqa: E501
:param auto_fb_post: The auto_fb_post of this CampaignSettings4. # noqa: E501
:type: list[str]
"""
self._auto_fb_post = auto_fb_post
@property
def fb_comments(self):
"""Gets the fb_comments of this CampaignSettings4. # noqa: E501
Allows Facebook comments on the campaign (also force-enables the Campaign Archive toolbar). Defaults to `true`. # noqa: E501
:return: The fb_comments of this CampaignSettings4. # noqa: E501
:rtype: bool
"""
return self._fb_comments
@fb_comments.setter
def fb_comments(self, fb_comments):
"""Sets the fb_comments of this CampaignSettings4.
Allows Facebook comments on the campaign (also force-enables the Campaign Archive toolbar). Defaults to `true`. # noqa: E501
:param fb_comments: The fb_comments of this CampaignSettings4. # noqa: E501
:type: bool
"""
self._fb_comments = fb_comments
@property
def template_id(self):
"""Gets the template_id of this CampaignSettings4. # noqa: E501
The id of the template to use. # noqa: E501
:return: The template_id of this CampaignSettings4. # noqa: E501
:rtype: int
"""
return self._template_id
@template_id.setter
def template_id(self, template_id):
"""Sets the template_id of this CampaignSettings4.
The id of the template to use. # noqa: E501
:param template_id: The template_id of this CampaignSettings4. # noqa: E501
:type: int
"""
self._template_id = template_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CampaignSettings4, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CampaignSettings4):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/campaign_tracking_options.py
```python
import pprint
import re # noqa: F401
import six
class CampaignTrackingOptions(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'opens': 'bool',
'html_clicks': 'bool',
'text_clicks': 'bool',
'goal_tracking': 'bool',
'ecomm360': 'bool',
'google_analytics': 'str',
'clicktale': 'str',
'salesforce': 'SalesforceCRMTracking',
'capsule': 'CapsuleCRMTracking1'
}
attribute_map = {
'opens': 'opens',
'html_clicks': 'html_clicks',
'text_clicks': 'text_clicks',
'goal_tracking': 'goal_tracking',
'ecomm360': 'ecomm360',
'google_analytics': 'google_analytics',
'clicktale': 'clicktale',
'salesforce': 'salesforce',
'capsule': 'capsule'
}
def __init__(self, opens=None, html_clicks=None, text_clicks=None, goal_tracking=None, ecomm360=None, google_analytics=None, clicktale=None, salesforce=None, capsule=None): # noqa: E501
"""CampaignTrackingOptions - a model defined in Swagger""" # noqa: E501
self._opens = None
self._html_clicks = None
self._text_clicks = None
self._goal_tracking = None
self._ecomm360 = None
self._google_analytics = None
self._clicktale = None
self._salesforce = None
self._capsule = None
self.discriminator = None
if opens is not None:
self.opens = opens
if html_clicks is not None:
self.html_clicks = html_clicks
if text_clicks is not None:
self.text_clicks = text_clicks
if goal_tracking is not None:
self.goal_tracking = goal_tracking
if ecomm360 is not None:
self.ecomm360 = ecomm360
if google_analytics is not None:
self.google_analytics = google_analytics
if clicktale is not None:
self.clicktale = clicktale
if salesforce is not None:
self.salesforce = salesforce
if capsule is not None:
self.capsule = capsule
@property
def opens(self):
"""Gets the opens of this CampaignTrackingOptions. # noqa: E501
Whether to [track opens](https://mailchimp.com/help/about-open-tracking/). Defaults to `true`. # noqa: E501
:return: The opens of this CampaignTrackingOptions. # noqa: E501
:rtype: bool
"""
return self._opens
@opens.setter
def opens(self, opens):
"""Sets the opens of this CampaignTrackingOptions.
Whether to [track opens](https://mailchimp.com/help/about-open-tracking/). Defaults to `true`. # noqa: E501
:param opens: The opens of this CampaignTrackingOptions. # noqa: E501
:type: bool
"""
self._opens = opens
@property
def html_clicks(self):
"""Gets the html_clicks of this CampaignTrackingOptions. # noqa: E501
Whether to [track clicks](https://mailchimp.com/help/enable-and-view-click-tracking/) in the HTML version of the campaign. Defaults to `true`. # noqa: E501
:return: The html_clicks of this CampaignTrackingOptions. # noqa: E501
:rtype: bool
"""
return self._html_clicks
@html_clicks.setter
def html_clicks(self, html_clicks):
"""Sets the html_clicks of this CampaignTrackingOptions.
Whether to [track clicks](https://mailchimp.com/help/enable-and-view-click-tracking/) in the HTML version of the campaign. Defaults to `true`. # noqa: E501
:param html_clicks: The html_clicks of this CampaignTrackingOptions. # noqa: E501
:type: bool
"""
self._html_clicks = html_clicks
@property
def text_clicks(self):
"""Gets the text_clicks of this CampaignTrackingOptions. # noqa: E501
Whether to [track clicks](https://mailchimp.com/help/enable-and-view-click-tracking/) in the plain-text version of the campaign. Defaults to `true`. # noqa: E501
:return: The text_clicks of this CampaignTrackingOptions. # noqa: E501
:rtype: bool
"""
return self._text_clicks
@text_clicks.setter
def text_clicks(self, text_clicks):
"""Sets the text_clicks of this CampaignTrackingOptions.
Whether to [track clicks](https://mailchimp.com/help/enable-and-view-click-tracking/) in the plain-text version of the campaign. Defaults to `true`. # noqa: E501
:param text_clicks: The text_clicks of this CampaignTrackingOptions. # noqa: E501
:type: bool
"""
self._text_clicks = text_clicks
@property
def goal_tracking(self):
"""Gets the goal_tracking of this CampaignTrackingOptions. # noqa: E501
Deprecated # noqa: E501
:return: The goal_tracking of this CampaignTrackingOptions. # noqa: E501
:rtype: bool
"""
return self._goal_tracking
@goal_tracking.setter
def goal_tracking(self, goal_tracking):
"""Sets the goal_tracking of this CampaignTrackingOptions.
Deprecated # noqa: E501
:param goal_tracking: The goal_tracking of this CampaignTrackingOptions. # noqa: E501
:type: bool
"""
self._goal_tracking = goal_tracking
@property
def ecomm360(self):
"""Gets the ecomm360 of this CampaignTrackingOptions. # noqa: E501
Whether to enable e-commerce tracking. # noqa: E501
:return: The ecomm360 of this CampaignTrackingOptions. # noqa: E501
:rtype: bool
"""
return self._ecomm360
@ecomm360.setter
def ecomm360(self, ecomm360):
"""Sets the ecomm360 of this CampaignTrackingOptions.
Whether to enable e-commerce tracking. # noqa: E501
:param ecomm360: The ecomm360 of this CampaignTrackingOptions. # noqa: E501
:type: bool
"""
self._ecomm360 = ecomm360
@property
def google_analytics(self):
"""Gets the google_analytics of this CampaignTrackingOptions. # noqa: E501
The custom slug for [Google Analytics](https://mailchimp.com/help/integrate-google-analytics-with-mailchimp/) tracking (max of 50 bytes). # noqa: E501
:return: The google_analytics of this CampaignTrackingOptions. # noqa: E501
:rtype: str
"""
return self._google_analytics
@google_analytics.setter
def google_analytics(self, google_analytics):
"""Sets the google_analytics of this CampaignTrackingOptions.
The custom slug for [Google Analytics](https://mailchimp.com/help/integrate-google-analytics-with-mailchimp/) tracking (max of 50 bytes). # noqa: E501
:param google_analytics: The google_analytics of this CampaignTrackingOptions. # noqa: E501
:type: str
"""
self._google_analytics = google_analytics
@property
def clicktale(self):
"""Gets the clicktale of this CampaignTrackingOptions. # noqa: E501
The custom slug for [Click Tale](https://mailchimp.com/help/additional-tracking-options-for-campaigns/) tracking (max of 50 bytes). # noqa: E501
:return: The clicktale of this CampaignTrackingOptions. # noqa: E501
:rtype: str
"""
return self._clicktale
@clicktale.setter
def clicktale(self, clicktale):
"""Sets the clicktale of this CampaignTrackingOptions.
The custom slug for [Click Tale](https://mailchimp.com/help/additional-tracking-options-for-campaigns/) tracking (max of 50 bytes). # noqa: E501
:param clicktale: The clicktale of this CampaignTrackingOptions. # noqa: E501
:type: str
"""
self._clicktale = clicktale
@property
def salesforce(self):
"""Gets the salesforce of this CampaignTrackingOptions. # noqa: E501
:return: The salesforce of this CampaignTrackingOptions. # noqa: E501
:rtype: SalesforceCRMTracking
"""
return self._salesforce
@salesforce.setter
def salesforce(self, salesforce):
"""Sets the salesforce of this CampaignTrackingOptions.
:param salesforce: The salesforce of this CampaignTrackingOptions. # noqa: E501
:type: SalesforceCRMTracking
"""
self._salesforce = salesforce
@property
def capsule(self):
"""Gets the capsule of this CampaignTrackingOptions. # noqa: E501
:return: The capsule of this CampaignTrackingOptions. # noqa: E501
:rtype: CapsuleCRMTracking1
"""
return self._capsule
@capsule.setter
def capsule(self, capsule):
"""Sets the capsule of this CampaignTrackingOptions.
:param capsule: The capsule of this CampaignTrackingOptions. # noqa: E501
:type: CapsuleCRMTracking1
"""
self._capsule = capsule
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CampaignTrackingOptions, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CampaignTrackingOptions):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/ecommerce_promo_rule2.py
```python
import pprint
import re # noqa: F401
import six
class EcommercePromoRule2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'title': 'str',
'description': 'str',
'starts_at': 'datetime',
'ends_at': 'str',
'amount': 'float',
'type': 'str',
'target': 'str',
'enabled': 'bool',
'created_at_foreign': 'datetime',
'updated_at_foreign': 'datetime'
}
attribute_map = {
'title': 'title',
'description': 'description',
'starts_at': 'starts_at',
'ends_at': 'ends_at',
'amount': 'amount',
'type': 'type',
'target': 'target',
'enabled': 'enabled',
'created_at_foreign': 'created_at_foreign',
'updated_at_foreign': 'updated_at_foreign'
}
def __init__(self, title=None, description=None, starts_at=None, ends_at=None, amount=None, type=None, target=None, enabled=None, created_at_foreign=None, updated_at_foreign=None): # noqa: E501
"""EcommercePromoRule2 - a model defined in Swagger""" # noqa: E501
self._title = None
self._description = None
self._starts_at = None
self._ends_at = None
self._amount = None
self._type = None
self._target = None
self._enabled = None
self._created_at_foreign = None
self._updated_at_foreign = None
self.discriminator = None
if title is not None:
self.title = title
if description is not None:
self.description = description
if starts_at is not None:
self.starts_at = starts_at
if ends_at is not None:
self.ends_at = ends_at
if amount is not None:
self.amount = amount
if type is not None:
self.type = type
if target is not None:
self.target = target
if enabled is not None:
self.enabled = enabled
if created_at_foreign is not None:
self.created_at_foreign = created_at_foreign
if updated_at_foreign is not None:
self.updated_at_foreign = updated_at_foreign
@property
def title(self):
"""Gets the title of this EcommercePromoRule2. # noqa: E501
The title that will show up in promotion campaign. Restricted to UTF-8 characters with max length of 100 bytes. # noqa: E501
:return: The title of this EcommercePromoRule2. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this EcommercePromoRule2.
The title that will show up in promotion campaign. Restricted to UTF-8 characters with max length of 100 bytes. # noqa: E501
:param title: The title of this EcommercePromoRule2. # noqa: E501
:type: str
"""
self._title = title
@property
def description(self):
"""Gets the description of this EcommercePromoRule2. # noqa: E501
The description of a promotion restricted to UTF-8 characters with max length 255. # noqa: E501
:return: The description of this EcommercePromoRule2. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this EcommercePromoRule2.
The description of a promotion restricted to UTF-8 characters with max length 255. # noqa: E501
:param description: The description of this EcommercePromoRule2. # noqa: E501
:type: str
"""
self._description = description
@property
def starts_at(self):
"""Gets the starts_at of this EcommercePromoRule2. # noqa: E501
The date and time when the promotion is in effect in ISO 8601 format. # noqa: E501
:return: The starts_at of this EcommercePromoRule2. # noqa: E501
:rtype: datetime
"""
return self._starts_at
@starts_at.setter
def starts_at(self, starts_at):
"""Sets the starts_at of this EcommercePromoRule2.
The date and time when the promotion is in effect in ISO 8601 format. # noqa: E501
:param starts_at: The starts_at of this EcommercePromoRule2. # noqa: E501
:type: datetime
"""
self._starts_at = starts_at
@property
def ends_at(self):
"""Gets the ends_at of this EcommercePromoRule2. # noqa: E501
The date and time when the promotion ends. Must be after starts_at and in ISO 8601 format. # noqa: E501
:return: The ends_at of this EcommercePromoRule2. # noqa: E501
:rtype: str
"""
return self._ends_at
@ends_at.setter
def ends_at(self, ends_at):
"""Sets the ends_at of this EcommercePromoRule2.
The date and time when the promotion ends. Must be after starts_at and in ISO 8601 format. # noqa: E501
:param ends_at: The ends_at of this EcommercePromoRule2. # noqa: E501
:type: str
"""
self._ends_at = ends_at
@property
def amount(self):
"""Gets the amount of this EcommercePromoRule2. # noqa: E501
The amount of the promo code discount. If 'type' is 'fixed', the amount is treated as a monetary value. If 'type' is 'percentage', amount must be a decimal value between 0.0 and 1.0, inclusive. # noqa: E501
:return: The amount of this EcommercePromoRule2. # noqa: E501
:rtype: float
"""
return self._amount
@amount.setter
def amount(self, amount):
"""Sets the amount of this EcommercePromoRule2.
The amount of the promo code discount. If 'type' is 'fixed', the amount is treated as a monetary value. If 'type' is 'percentage', amount must be a decimal value between 0.0 and 1.0, inclusive. # noqa: E501
:param amount: The amount of this EcommercePromoRule2. # noqa: E501
:type: float
"""
self._amount = amount
@property
def type(self):
"""Gets the type of this EcommercePromoRule2. # noqa: E501
Type of discount. For free shipping set type to fixed. # noqa: E501
:return: The type of this EcommercePromoRule2. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this EcommercePromoRule2.
Type of discount. For free shipping set type to fixed. # noqa: E501
:param type: The type of this EcommercePromoRule2. # noqa: E501
:type: str
"""
allowed_values = ["fixed", "percentage"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def target(self):
"""Gets the target of this EcommercePromoRule2. # noqa: E501
The target that the discount applies to. # noqa: E501
:return: The target of this EcommercePromoRule2. # noqa: E501
:rtype: str
"""
return self._target
@target.setter
def target(self, target):
"""Sets the target of this EcommercePromoRule2.
The target that the discount applies to. # noqa: E501
:param target: The target of this EcommercePromoRule2. # noqa: E501
:type: str
"""
allowed_values = ["per_item", "total", "shipping"] # noqa: E501
if target not in allowed_values:
raise ValueError(
"Invalid value for `target` ({0}), must be one of {1}" # noqa: E501
.format(target, allowed_values)
)
self._target = target
@property
def enabled(self):
"""Gets the enabled of this EcommercePromoRule2. # noqa: E501
Whether the promo rule is currently enabled. # noqa: E501
:return: The enabled of this EcommercePromoRule2. # noqa: E501
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this EcommercePromoRule2.
Whether the promo rule is currently enabled. # noqa: E501
:param enabled: The enabled of this EcommercePromoRule2. # noqa: E501
:type: bool
"""
self._enabled = enabled
@property
def created_at_foreign(self):
"""Gets the created_at_foreign of this EcommercePromoRule2. # noqa: E501
The date and time the promotion was created in ISO 8601 format. # noqa: E501
:return: The created_at_foreign of this EcommercePromoRule2. # noqa: E501
:rtype: datetime
"""
return self._created_at_foreign
@created_at_foreign.setter
def created_at_foreign(self, created_at_foreign):
"""Sets the created_at_foreign of this EcommercePromoRule2.
The date and time the promotion was created in ISO 8601 format. # noqa: E501
:param created_at_foreign: The created_at_foreign of this EcommercePromoRule2. # noqa: E501
:type: datetime
"""
self._created_at_foreign = created_at_foreign
@property
def updated_at_foreign(self):
"""Gets the updated_at_foreign of this EcommercePromoRule2. # noqa: E501
The date and time the promotion was updated in ISO 8601 format. # noqa: E501
:return: The updated_at_foreign of this EcommercePromoRule2. # noqa: E501
:rtype: datetime
"""
return self._updated_at_foreign
@updated_at_foreign.setter
def updated_at_foreign(self, updated_at_foreign):
"""Sets the updated_at_foreign of this EcommercePromoRule2.
The date and time the promotion was updated in ISO 8601 format. # noqa: E501
:param updated_at_foreign: The updated_at_foreign of this EcommercePromoRule2. # noqa: E501
:type: datetime
"""
self._updated_at_foreign = updated_at_foreign
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EcommercePromoRule2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EcommercePromoRule2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/ecommerce_store.py
```python
import pprint
import re # noqa: F401
import six
class EcommerceStore(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'list_id': 'str',
'name': 'str',
'platform': 'str',
'domain': 'str',
'is_syncing': 'bool',
'email_address': 'str',
'currency_code': 'str',
'money_format': 'str',
'primary_locale': 'str',
'timezone': 'str',
'phone': 'str',
'address': 'Address1',
'connected_site': 'ConnectedSite2',
'automations': 'Automations',
'list_is_active': 'bool',
'created_at': 'datetime',
'updated_at': 'datetime',
'links': 'list[ResourceLink]'
}
attribute_map = {
'id': 'id',
'list_id': 'list_id',
'name': 'name',
'platform': 'platform',
'domain': 'domain',
'is_syncing': 'is_syncing',
'email_address': 'email_address',
'currency_code': 'currency_code',
'money_format': 'money_format',
'primary_locale': 'primary_locale',
'timezone': 'timezone',
'phone': 'phone',
'address': 'address',
'connected_site': 'connected_site',
'automations': 'automations',
'list_is_active': 'list_is_active',
'created_at': 'created_at',
'updated_at': 'updated_at',
'links': '_links'
}
def __init__(self, id=None, list_id=None, name=None, platform=None, domain=None, is_syncing=None, email_address=None, currency_code=None, money_format=None, primary_locale=None, timezone=None, phone=None, address=None, connected_site=None, automations=None, list_is_active=None, created_at=None, updated_at=None, links=None): # noqa: E501
"""EcommerceStore - a model defined in Swagger""" # noqa: E501
self._id = None
self._list_id = None
self._name = None
self._platform = None
self._domain = None
self._is_syncing = None
self._email_address = None
self._currency_code = None
self._money_format = None
self._primary_locale = None
self._timezone = None
self._phone = None
self._address = None
self._connected_site = None
self._automations = None
self._list_is_active = None
self._created_at = None
self._updated_at = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if list_id is not None:
self.list_id = list_id
if name is not None:
self.name = name
if platform is not None:
self.platform = platform
if domain is not None:
self.domain = domain
if is_syncing is not None:
self.is_syncing = is_syncing
if email_address is not None:
self.email_address = email_address
if currency_code is not None:
self.currency_code = currency_code
if money_format is not None:
self.money_format = money_format
if primary_locale is not None:
self.primary_locale = primary_locale
if timezone is not None:
self.timezone = timezone
if phone is not None:
self.phone = phone
if address is not None:
self.address = address
if connected_site is not None:
self.connected_site = connected_site
if automations is not None:
self.automations = automations
if list_is_active is not None:
self.list_is_active = list_is_active
if created_at is not None:
self.created_at = created_at
if updated_at is not None:
self.updated_at = updated_at
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this EcommerceStore. # noqa: E501
The unique identifier for the store. # noqa: E501
:return: The id of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this EcommerceStore.
The unique identifier for the store. # noqa: E501
:param id: The id of this EcommerceStore. # noqa: E501
:type: str
"""
self._id = id
@property
def list_id(self):
"""Gets the list_id of this EcommerceStore. # noqa: E501
The unique identifier for the list that's associated with the store. The `list_id` for a specific store can't change. # noqa: E501
:return: The list_id of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._list_id
@list_id.setter
def list_id(self, list_id):
"""Sets the list_id of this EcommerceStore.
The unique identifier for the list that's associated with the store. The `list_id` for a specific store can't change. # noqa: E501
:param list_id: The list_id of this EcommerceStore. # noqa: E501
:type: str
"""
self._list_id = list_id
@property
def name(self):
"""Gets the name of this EcommerceStore. # noqa: E501
The name of the store. # noqa: E501
:return: The name of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this EcommerceStore.
The name of the store. # noqa: E501
:param name: The name of this EcommerceStore. # noqa: E501
:type: str
"""
self._name = name
@property
def platform(self):
"""Gets the platform of this EcommerceStore. # noqa: E501
The e-commerce platform of the store. # noqa: E501
:return: The platform of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._platform
@platform.setter
def platform(self, platform):
"""Sets the platform of this EcommerceStore.
The e-commerce platform of the store. # noqa: E501
:param platform: The platform of this EcommerceStore. # noqa: E501
:type: str
"""
self._platform = platform
@property
def domain(self):
"""Gets the domain of this EcommerceStore. # noqa: E501
The store domain. The store domain must be unique within a user account. # noqa: E501
:return: The domain of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._domain
@domain.setter
def domain(self, domain):
"""Sets the domain of this EcommerceStore.
The store domain. The store domain must be unique within a user account. # noqa: E501
:param domain: The domain of this EcommerceStore. # noqa: E501
:type: str
"""
self._domain = domain
@property
def is_syncing(self):
"""Gets the is_syncing of this EcommerceStore. # noqa: E501
Whether to disable automations because the store is currently [syncing](https://mailchimp.com/developer/marketing/docs/e-commerce/#pausing-store-automations). # noqa: E501
:return: The is_syncing of this EcommerceStore. # noqa: E501
:rtype: bool
"""
return self._is_syncing
@is_syncing.setter
def is_syncing(self, is_syncing):
"""Sets the is_syncing of this EcommerceStore.
Whether to disable automations because the store is currently [syncing](https://mailchimp.com/developer/marketing/docs/e-commerce/#pausing-store-automations). # noqa: E501
:param is_syncing: The is_syncing of this EcommerceStore. # noqa: E501
:type: bool
"""
self._is_syncing = is_syncing
@property
def email_address(self):
"""Gets the email_address of this EcommerceStore. # noqa: E501
The email address for the store. # noqa: E501
:return: The email_address of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""Sets the email_address of this EcommerceStore.
The email address for the store. # noqa: E501
:param email_address: The email_address of this EcommerceStore. # noqa: E501
:type: str
"""
self._email_address = email_address
@property
def currency_code(self):
"""Gets the currency_code of this EcommerceStore. # noqa: E501
The three-letter ISO 4217 code for the currency that the store accepts. # noqa: E501
:return: The currency_code of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""Sets the currency_code of this EcommerceStore.
The three-letter ISO 4217 code for the currency that the store accepts. # noqa: E501
:param currency_code: The currency_code of this EcommerceStore. # noqa: E501
:type: str
"""
self._currency_code = currency_code
@property
def money_format(self):
"""Gets the money_format of this EcommerceStore. # noqa: E501
The currency format for the store. For example: `$`, `£`, etc. # noqa: E501
:return: The money_format of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._money_format
@money_format.setter
def money_format(self, money_format):
"""Sets the money_format of this EcommerceStore.
The currency format for the store. For example: `$`, `£`, etc. # noqa: E501
:param money_format: The money_format of this EcommerceStore. # noqa: E501
:type: str
"""
self._money_format = money_format
@property
def primary_locale(self):
"""Gets the primary_locale of this EcommerceStore. # noqa: E501
The primary locale for the store. For example: `en`, `de`, etc. # noqa: E501
:return: The primary_locale of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._primary_locale
@primary_locale.setter
def primary_locale(self, primary_locale):
"""Sets the primary_locale of this EcommerceStore.
The primary locale for the store. For example: `en`, `de`, etc. # noqa: E501
:param primary_locale: The primary_locale of this EcommerceStore. # noqa: E501
:type: str
"""
self._primary_locale = primary_locale
@property
def timezone(self):
"""Gets the timezone of this EcommerceStore. # noqa: E501
The timezone for the store. # noqa: E501
:return: The timezone of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._timezone
@timezone.setter
def timezone(self, timezone):
"""Sets the timezone of this EcommerceStore.
The timezone for the store. # noqa: E501
:param timezone: The timezone of this EcommerceStore. # noqa: E501
:type: str
"""
self._timezone = timezone
@property
def phone(self):
"""Gets the phone of this EcommerceStore. # noqa: E501
The store phone number. # noqa: E501
:return: The phone of this EcommerceStore. # noqa: E501
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""Sets the phone of this EcommerceStore.
The store phone number. # noqa: E501
:param phone: The phone of this EcommerceStore. # noqa: E501
:type: str
"""
self._phone = phone
@property
def address(self):
"""Gets the address of this EcommerceStore. # noqa: E501
:return: The address of this EcommerceStore. # noqa: E501
:rtype: Address1
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this EcommerceStore.
:param address: The address of this EcommerceStore. # noqa: E501
:type: Address1
"""
self._address = address
@property
def connected_site(self):
"""Gets the connected_site of this EcommerceStore. # noqa: E501
:return: The connected_site of this EcommerceStore. # noqa: E501
:rtype: ConnectedSite2
"""
return self._connected_site
@connected_site.setter
def connected_site(self, connected_site):
"""Sets the connected_site of this EcommerceStore.
:param connected_site: The connected_site of this EcommerceStore. # noqa: E501
:type: ConnectedSite2
"""
self._connected_site = connected_site
@property
def automations(self):
"""Gets the automations of this EcommerceStore. # noqa: E501
:return: The automations of this EcommerceStore. # noqa: E501
:rtype: Automations
"""
return self._automations
@automations.setter
def automations(self, automations):
"""Sets the automations of this EcommerceStore.
:param automations: The automations of this EcommerceStore. # noqa: E501
:type: Automations
"""
self._automations = automations
@property
def list_is_active(self):
"""Gets the list_is_active of this EcommerceStore. # noqa: E501
The status of the list connected to the store, namely if it's deleted or disabled. # noqa: E501
:return: The list_is_active of this EcommerceStore. # noqa: E501
:rtype: bool
"""
return self._list_is_active
@list_is_active.setter
def list_is_active(self, list_is_active):
"""Sets the list_is_active of this EcommerceStore.
The status of the list connected to the store, namely if it's deleted or disabled. # noqa: E501
:param list_is_active: The list_is_active of this EcommerceStore. # noqa: E501
:type: bool
"""
self._list_is_active = list_is_active
@property
def created_at(self):
"""Gets the created_at of this EcommerceStore. # noqa: E501
The date and time the store was created in ISO 8601 format. # noqa: E501
:return: The created_at of this EcommerceStore. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this EcommerceStore.
The date and time the store was created in ISO 8601 format. # noqa: E501
:param created_at: The created_at of this EcommerceStore. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def updated_at(self):
"""Gets the updated_at of this EcommerceStore. # noqa: E501
The date and time the store was last updated in ISO 8601 format. # noqa: E501
:return: The updated_at of this EcommerceStore. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this EcommerceStore.
The date and time the store was last updated in ISO 8601 format. # noqa: E501
:param updated_at: The updated_at of this EcommerceStore. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def links(self):
"""Gets the links of this EcommerceStore. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this EcommerceStore. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this EcommerceStore.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this EcommerceStore. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(EcommerceStore, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EcommerceStore):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/events1.py
```python
import pprint
import re # noqa: F401
import six
class Events1(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'subscribe': 'bool',
'unsubscribe': 'bool',
'profile': 'bool',
'cleaned': 'bool',
'upemail': 'bool',
'campaign': 'bool'
}
attribute_map = {
'subscribe': 'subscribe',
'unsubscribe': 'unsubscribe',
'profile': 'profile',
'cleaned': 'cleaned',
'upemail': 'upemail',
'campaign': 'campaign'
}
def __init__(self, subscribe=None, unsubscribe=None, profile=None, cleaned=None, upemail=None, campaign=None): # noqa: E501
"""Events1 - a model defined in Swagger""" # noqa: E501
self._subscribe = None
self._unsubscribe = None
self._profile = None
self._cleaned = None
self._upemail = None
self._campaign = None
self.discriminator = None
if subscribe is not None:
self.subscribe = subscribe
if unsubscribe is not None:
self.unsubscribe = unsubscribe
if profile is not None:
self.profile = profile
if cleaned is not None:
self.cleaned = cleaned
if upemail is not None:
self.upemail = upemail
if campaign is not None:
self.campaign = campaign
@property
def subscribe(self):
"""Gets the subscribe of this Events1. # noqa: E501
Whether the webhook is triggered when a list subscriber is added. # noqa: E501
:return: The subscribe of this Events1. # noqa: E501
:rtype: bool
"""
return self._subscribe
@subscribe.setter
def subscribe(self, subscribe):
"""Sets the subscribe of this Events1.
Whether the webhook is triggered when a list subscriber is added. # noqa: E501
:param subscribe: The subscribe of this Events1. # noqa: E501
:type: bool
"""
self._subscribe = subscribe
@property
def unsubscribe(self):
"""Gets the unsubscribe of this Events1. # noqa: E501
Whether the webhook is triggered when a list member unsubscribes. # noqa: E501
:return: The unsubscribe of this Events1. # noqa: E501
:rtype: bool
"""
return self._unsubscribe
@unsubscribe.setter
def unsubscribe(self, unsubscribe):
"""Sets the unsubscribe of this Events1.
Whether the webhook is triggered when a list member unsubscribes. # noqa: E501
:param unsubscribe: The unsubscribe of this Events1. # noqa: E501
:type: bool
"""
self._unsubscribe = unsubscribe
@property
def profile(self):
"""Gets the profile of this Events1. # noqa: E501
Whether the webhook is triggered when a subscriber's profile is updated. # noqa: E501
:return: The profile of this Events1. # noqa: E501
:rtype: bool
"""
return self._profile
@profile.setter
def profile(self, profile):
"""Sets the profile of this Events1.
Whether the webhook is triggered when a subscriber's profile is updated. # noqa: E501
:param profile: The profile of this Events1. # noqa: E501
:type: bool
"""
self._profile = profile
@property
def cleaned(self):
"""Gets the cleaned of this Events1. # noqa: E501
Whether the webhook is triggered when a subscriber's email address is cleaned from the list. # noqa: E501
:return: The cleaned of this Events1. # noqa: E501
:rtype: bool
"""
return self._cleaned
@cleaned.setter
def cleaned(self, cleaned):
"""Sets the cleaned of this Events1.
Whether the webhook is triggered when a subscriber's email address is cleaned from the list. # noqa: E501
:param cleaned: The cleaned of this Events1. # noqa: E501
:type: bool
"""
self._cleaned = cleaned
@property
def upemail(self):
"""Gets the upemail of this Events1. # noqa: E501
Whether the webhook is triggered when a subscriber's email address is changed. # noqa: E501
:return: The upemail of this Events1. # noqa: E501
:rtype: bool
"""
return self._upemail
@upemail.setter
def upemail(self, upemail):
"""Sets the upemail of this Events1.
Whether the webhook is triggered when a subscriber's email address is changed. # noqa: E501
:param upemail: The upemail of this Events1. # noqa: E501
:type: bool
"""
self._upemail = upemail
@property
def campaign(self):
"""Gets the campaign of this Events1. # noqa: E501
Whether the webhook is triggered when a campaign is sent or cancelled. # noqa: E501
:return: The campaign of this Events1. # noqa: E501
:rtype: bool
"""
return self._campaign
@campaign.setter
def campaign(self, campaign):
"""Sets the campaign of this Events1.
Whether the webhook is triggered when a campaign is sent or cancelled. # noqa: E501
:param campaign: The campaign of this Events1. # noqa: E501
:type: bool
"""
self._campaign = campaign
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Events1, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Events1):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/file_manager.py
```python
import pprint
import re # noqa: F401
import six
class FileManager(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'files': 'list[GalleryFile]',
'total_file_size': 'float',
'total_items': 'int',
'links': 'list[ResourceLink]'
}
attribute_map = {
'files': 'files',
'total_file_size': 'total_file_size',
'total_items': 'total_items',
'links': '_links'
}
def __init__(self, files=None, total_file_size=None, total_items=None, links=None): # noqa: E501
"""FileManager - a model defined in Swagger""" # noqa: E501
self._files = None
self._total_file_size = None
self._total_items = None
self._links = None
self.discriminator = None
if files is not None:
self.files = files
if total_file_size is not None:
self.total_file_size = total_file_size
if total_items is not None:
self.total_items = total_items
if links is not None:
self.links = links
@property
def files(self):
"""Gets the files of this FileManager. # noqa: E501
A list of files and images in an account. # noqa: E501
:return: The files of this FileManager. # noqa: E501
:rtype: list[GalleryFile]
"""
return self._files
@files.setter
def files(self, files):
"""Sets the files of this FileManager.
A list of files and images in an account. # noqa: E501
:param files: The files of this FileManager. # noqa: E501
:type: list[GalleryFile]
"""
self._files = files
@property
def total_file_size(self):
"""Gets the total_file_size of this FileManager. # noqa: E501
The total size of all File Manager files in bytes. # noqa: E501
:return: The total_file_size of this FileManager. # noqa: E501
:rtype: float
"""
return self._total_file_size
@total_file_size.setter
def total_file_size(self, total_file_size):
"""Sets the total_file_size of this FileManager.
The total size of all File Manager files in bytes. # noqa: E501
:param total_file_size: The total_file_size of this FileManager. # noqa: E501
:type: float
"""
self._total_file_size = total_file_size
@property
def total_items(self):
"""Gets the total_items of this FileManager. # noqa: E501
The total number of items matching the query regardless of pagination. # noqa: E501
:return: The total_items of this FileManager. # noqa: E501
:rtype: int
"""
return self._total_items
@total_items.setter
def total_items(self, total_items):
"""Sets the total_items of this FileManager.
The total number of items matching the query regardless of pagination. # noqa: E501
:param total_items: The total_items of this FileManager. # noqa: E501
:type: int
"""
self._total_items = total_items
@property
def links(self):
"""Gets the links of this FileManager. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this FileManager. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this FileManager.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this FileManager. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(FileManager, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FileManager):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/gallery_file.py
```python
import pprint
import re # noqa: F401
import six
class GalleryFile(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'folder_id': 'int',
'type': 'str',
'name': 'str',
'full_size_url': 'str',
'thumbnail_url': 'str',
'size': 'int',
'created_at': 'datetime',
'created_by': 'str',
'width': 'int',
'height': 'int',
'links': 'list[ResourceLink]'
}
attribute_map = {
'id': 'id',
'folder_id': 'folder_id',
'type': 'type',
'name': 'name',
'full_size_url': 'full_size_url',
'thumbnail_url': 'thumbnail_url',
'size': 'size',
'created_at': 'created_at',
'created_by': 'created_by',
'width': 'width',
'height': 'height',
'links': '_links'
}
def __init__(self, id=None, folder_id=None, type=None, name=None, full_size_url=None, thumbnail_url=None, size=None, created_at=None, created_by=None, width=None, height=None, links=None): # noqa: E501
"""GalleryFile - a model defined in Swagger""" # noqa: E501
self._id = None
self._folder_id = None
self._type = None
self._name = None
self._full_size_url = None
self._thumbnail_url = None
self._size = None
self._created_at = None
self._created_by = None
self._width = None
self._height = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if folder_id is not None:
self.folder_id = folder_id
if type is not None:
self.type = type
if name is not None:
self.name = name
if full_size_url is not None:
self.full_size_url = full_size_url
if thumbnail_url is not None:
self.thumbnail_url = thumbnail_url
if size is not None:
self.size = size
if created_at is not None:
self.created_at = created_at
if created_by is not None:
self.created_by = created_by
if width is not None:
self.width = width
if height is not None:
self.height = height
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this GalleryFile. # noqa: E501
The unique id of the file. # noqa: E501
:return: The id of this GalleryFile. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this GalleryFile.
The unique id of the file. # noqa: E501
:param id: The id of this GalleryFile. # noqa: E501
:type: int
"""
self._id = id
@property
def folder_id(self):
"""Gets the folder_id of this GalleryFile. # noqa: E501
The id of the folder. # noqa: E501
:return: The folder_id of this GalleryFile. # noqa: E501
:rtype: int
"""
return self._folder_id
@folder_id.setter
def folder_id(self, folder_id):
"""Sets the folder_id of this GalleryFile.
The id of the folder. # noqa: E501
:param folder_id: The folder_id of this GalleryFile. # noqa: E501
:type: int
"""
self._folder_id = folder_id
@property
def type(self):
"""Gets the type of this GalleryFile. # noqa: E501
The type of file in the File Manager. # noqa: E501
:return: The type of this GalleryFile. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this GalleryFile.
The type of file in the File Manager. # noqa: E501
:param type: The type of this GalleryFile. # noqa: E501
:type: str
"""
allowed_values = ["image", "file"] # noqa: E501
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}" # noqa: E501
.format(type, allowed_values)
)
self._type = type
@property
def name(self):
"""Gets the name of this GalleryFile. # noqa: E501
The name of the file. # noqa: E501
:return: The name of this GalleryFile. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this GalleryFile.
The name of the file. # noqa: E501
:param name: The name of this GalleryFile. # noqa: E501
:type: str
"""
self._name = name
@property
def full_size_url(self):
"""Gets the full_size_url of this GalleryFile. # noqa: E501
The url of the full-size file. # noqa: E501
:return: The full_size_url of this GalleryFile. # noqa: E501
:rtype: str
"""
return self._full_size_url
@full_size_url.setter
def full_size_url(self, full_size_url):
"""Sets the full_size_url of this GalleryFile.
The url of the full-size file. # noqa: E501
:param full_size_url: The full_size_url of this GalleryFile. # noqa: E501
:type: str
"""
self._full_size_url = full_size_url
@property
def thumbnail_url(self):
"""Gets the thumbnail_url of this GalleryFile. # noqa: E501
The url of the thumbnail preview. # noqa: E501
:return: The thumbnail_url of this GalleryFile. # noqa: E501
:rtype: str
"""
return self._thumbnail_url
@thumbnail_url.setter
def thumbnail_url(self, thumbnail_url):
"""Sets the thumbnail_url of this GalleryFile.
The url of the thumbnail preview. # noqa: E501
:param thumbnail_url: The thumbnail_url of this GalleryFile. # noqa: E501
:type: str
"""
self._thumbnail_url = thumbnail_url
@property
def size(self):
"""Gets the size of this GalleryFile. # noqa: E501
The size of the file in bytes. # noqa: E501
:return: The size of this GalleryFile. # noqa: E501
:rtype: int
"""
return self._size
@size.setter
def size(self, size):
"""Sets the size of this GalleryFile.
The size of the file in bytes. # noqa: E501
:param size: The size of this GalleryFile. # noqa: E501
:type: int
"""
self._size = size
@property
def created_at(self):
"""Gets the created_at of this GalleryFile. # noqa: E501
The date and time a file was added to the File Manager in ISO 8601 format. # noqa: E501
:return: The created_at of this GalleryFile. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this GalleryFile.
The date and time a file was added to the File Manager in ISO 8601 format. # noqa: E501
:param created_at: The created_at of this GalleryFile. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def created_by(self):
"""Gets the created_by of this GalleryFile. # noqa: E501
The username of the profile that uploaded the file. # noqa: E501
:return: The created_by of this GalleryFile. # noqa: E501
:rtype: str
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""Sets the created_by of this GalleryFile.
The username of the profile that uploaded the file. # noqa: E501
:param created_by: The created_by of this GalleryFile. # noqa: E501
:type: str
"""
self._created_by = created_by
@property
def width(self):
"""Gets the width of this GalleryFile. # noqa: E501
The width of the image. # noqa: E501
:return: The width of this GalleryFile. # noqa: E501
:rtype: int
"""
return self._width
@width.setter
def width(self, width):
"""Sets the width of this GalleryFile.
The width of the image. # noqa: E501
:param width: The width of this GalleryFile. # noqa: E501
:type: int
"""
self._width = width
@property
def height(self):
"""Gets the height of this GalleryFile. # noqa: E501
The height of an image. # noqa: E501
:return: The height of this GalleryFile. # noqa: E501
:rtype: int
"""
return self._height
@height.setter
def height(self, height):
"""Sets the height of this GalleryFile.
The height of an image. # noqa: E501
:param height: The height of this GalleryFile. # noqa: E501
:type: int
"""
self._height = height
@property
def links(self):
"""Gets the links of this GalleryFile. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this GalleryFile. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this GalleryFile.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this GalleryFile. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(GalleryFile, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GalleryFile):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/list_location.py
```python
import pprint
import re # noqa: F401
import six
class ListLocation(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'country': 'str',
'cc': 'str',
'percent': 'float',
'total': 'int'
}
attribute_map = {
'country': 'country',
'cc': 'cc',
'percent': 'percent',
'total': 'total'
}
def __init__(self, country=None, cc=None, percent=None, total=None): # noqa: E501
"""ListLocation - a model defined in Swagger""" # noqa: E501
self._country = None
self._cc = None
self._percent = None
self._total = None
self.discriminator = None
if country is not None:
self.country = country
if cc is not None:
self.cc = cc
if percent is not None:
self.percent = percent
if total is not None:
self.total = total
@property
def country(self):
"""Gets the country of this ListLocation. # noqa: E501
The name of the country. # noqa: E501
:return: The country of this ListLocation. # noqa: E501
:rtype: str
"""
return self._country
@country.setter
def country(self, country):
"""Sets the country of this ListLocation.
The name of the country. # noqa: E501
:param country: The country of this ListLocation. # noqa: E501
:type: str
"""
self._country = country
@property
def cc(self):
"""Gets the cc of this ListLocation. # noqa: E501
The ISO 3166 2 digit country code. # noqa: E501
:return: The cc of this ListLocation. # noqa: E501
:rtype: str
"""
return self._cc
@cc.setter
def cc(self, cc):
"""Sets the cc of this ListLocation.
The ISO 3166 2 digit country code. # noqa: E501
:param cc: The cc of this ListLocation. # noqa: E501
:type: str
"""
self._cc = cc
@property
def percent(self):
"""Gets the percent of this ListLocation. # noqa: E501
The percent of subscribers in the country. # noqa: E501
:return: The percent of this ListLocation. # noqa: E501
:rtype: float
"""
return self._percent
@percent.setter
def percent(self, percent):
"""Sets the percent of this ListLocation.
The percent of subscribers in the country. # noqa: E501
:param percent: The percent of this ListLocation. # noqa: E501
:type: float
"""
self._percent = percent
@property
def total(self):
"""Gets the total of this ListLocation. # noqa: E501
The total number of subscribers in the country. # noqa: E501
:return: The total of this ListLocation. # noqa: E501
:rtype: int
"""
return self._total
@total.setter
def total(self, total):
"""Sets the total of this ListLocation.
The total number of subscribers in the country. # noqa: E501
:param total: The total of this ListLocation. # noqa: E501
:type: int
"""
self._total = total
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ListLocation, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListLocation):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/member_notes.py
```python
import pprint
import re # noqa: F401
import six
class MemberNotes(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'int',
'created_at': 'datetime',
'created_by': 'str',
'updated_at': 'datetime',
'note': 'str',
'list_id': 'str',
'email_id': 'str',
'links': 'list[ResourceLink]'
}
attribute_map = {
'id': 'id',
'created_at': 'created_at',
'created_by': 'created_by',
'updated_at': 'updated_at',
'note': 'note',
'list_id': 'list_id',
'email_id': 'email_id',
'links': '_links'
}
def __init__(self, id=None, created_at=None, created_by=None, updated_at=None, note=None, list_id=None, email_id=None, links=None): # noqa: E501
"""MemberNotes - a model defined in Swagger""" # noqa: E501
self._id = None
self._created_at = None
self._created_by = None
self._updated_at = None
self._note = None
self._list_id = None
self._email_id = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if created_at is not None:
self.created_at = created_at
if created_by is not None:
self.created_by = created_by
if updated_at is not None:
self.updated_at = updated_at
if note is not None:
self.note = note
if list_id is not None:
self.list_id = list_id
if email_id is not None:
self.email_id = email_id
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this MemberNotes. # noqa: E501
The note id. # noqa: E501
:return: The id of this MemberNotes. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this MemberNotes.
The note id. # noqa: E501
:param id: The id of this MemberNotes. # noqa: E501
:type: int
"""
self._id = id
@property
def created_at(self):
"""Gets the created_at of this MemberNotes. # noqa: E501
The date and time the note was created in ISO 8601 format. # noqa: E501
:return: The created_at of this MemberNotes. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this MemberNotes.
The date and time the note was created in ISO 8601 format. # noqa: E501
:param created_at: The created_at of this MemberNotes. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def created_by(self):
"""Gets the created_by of this MemberNotes. # noqa: E501
The author of the note. # noqa: E501
:return: The created_by of this MemberNotes. # noqa: E501
:rtype: str
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""Sets the created_by of this MemberNotes.
The author of the note. # noqa: E501
:param created_by: The created_by of this MemberNotes. # noqa: E501
:type: str
"""
self._created_by = created_by
@property
def updated_at(self):
"""Gets the updated_at of this MemberNotes. # noqa: E501
The date and time the note was last updated in ISO 8601 format. # noqa: E501
:return: The updated_at of this MemberNotes. # noqa: E501
:rtype: datetime
"""
return self._updated_at
@updated_at.setter
def updated_at(self, updated_at):
"""Sets the updated_at of this MemberNotes.
The date and time the note was last updated in ISO 8601 format. # noqa: E501
:param updated_at: The updated_at of this MemberNotes. # noqa: E501
:type: datetime
"""
self._updated_at = updated_at
@property
def note(self):
"""Gets the note of this MemberNotes. # noqa: E501
The content of the note. # noqa: E501
:return: The note of this MemberNotes. # noqa: E501
:rtype: str
"""
return self._note
@note.setter
def note(self, note):
"""Sets the note of this MemberNotes.
The content of the note. # noqa: E501
:param note: The note of this MemberNotes. # noqa: E501
:type: str
"""
self._note = note
@property
def list_id(self):
"""Gets the list_id of this MemberNotes. # noqa: E501
The unique id for the list. # noqa: E501
:return: The list_id of this MemberNotes. # noqa: E501
:rtype: str
"""
return self._list_id
@list_id.setter
def list_id(self, list_id):
"""Sets the list_id of this MemberNotes.
The unique id for the list. # noqa: E501
:param list_id: The list_id of this MemberNotes. # noqa: E501
:type: str
"""
self._list_id = list_id
@property
def email_id(self):
"""Gets the email_id of this MemberNotes. # noqa: E501
The MD5 hash of the lowercase version of the list member's email address. # noqa: E501
:return: The email_id of this MemberNotes. # noqa: E501
:rtype: str
"""
return self._email_id
@email_id.setter
def email_id(self, email_id):
"""Sets the email_id of this MemberNotes.
The MD5 hash of the lowercase version of the list member's email address. # noqa: E501
:param email_id: The email_id of this MemberNotes. # noqa: E501
:type: str
"""
self._email_id = email_id
@property
def links(self):
"""Gets the links of this MemberNotes. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this MemberNotes. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this MemberNotes.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this MemberNotes. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MemberNotes, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MemberNotes):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/member_tags.py
```python
import pprint
import re # noqa: F401
import six
class MemberTags(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'tags': 'list[MemberTag]',
'is_syncing': 'bool'
}
attribute_map = {
'tags': 'tags',
'is_syncing': 'is_syncing'
}
def __init__(self, tags=None, is_syncing=None): # noqa: E501
"""MemberTags - a model defined in Swagger""" # noqa: E501
self._tags = None
self._is_syncing = None
self.discriminator = None
self.tags = tags
if is_syncing is not None:
self.is_syncing = is_syncing
@property
def tags(self):
"""Gets the tags of this MemberTags. # noqa: E501
A list of tags assigned to the list member. # noqa: E501
:return: The tags of this MemberTags. # noqa: E501
:rtype: list[MemberTag]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this MemberTags.
A list of tags assigned to the list member. # noqa: E501
:param tags: The tags of this MemberTags. # noqa: E501
:type: list[MemberTag]
"""
if tags is None:
raise ValueError("Invalid value for `tags`, must not be `None`") # noqa: E501
self._tags = tags
@property
def is_syncing(self):
"""Gets the is_syncing of this MemberTags. # noqa: E501
When is_syncing is true, automations based on the tags in the request will not fire # noqa: E501
:return: The is_syncing of this MemberTags. # noqa: E501
:rtype: bool
"""
return self._is_syncing
@is_syncing.setter
def is_syncing(self, is_syncing):
"""Sets the is_syncing of this MemberTags.
When is_syncing is true, automations based on the tags in the request will not fire # noqa: E501
:param is_syncing: The is_syncing of this MemberTags. # noqa: E501
:type: bool
"""
self._is_syncing = is_syncing
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MemberTags, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MemberTags):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/opens.py
```python
import pprint
import re # noqa: F401
import six
class Opens(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'opens_total': 'int',
'unique_opens': 'int',
'open_rate': 'float',
'last_open': 'datetime'
}
attribute_map = {
'opens_total': 'opens_total',
'unique_opens': 'unique_opens',
'open_rate': 'open_rate',
'last_open': 'last_open'
}
def __init__(self, opens_total=None, unique_opens=None, open_rate=None, last_open=None): # noqa: E501
"""Opens - a model defined in Swagger""" # noqa: E501
self._opens_total = None
self._unique_opens = None
self._open_rate = None
self._last_open = None
self.discriminator = None
if opens_total is not None:
self.opens_total = opens_total
if unique_opens is not None:
self.unique_opens = unique_opens
if open_rate is not None:
self.open_rate = open_rate
if last_open is not None:
self.last_open = last_open
@property
def opens_total(self):
"""Gets the opens_total of this Opens. # noqa: E501
The total number of opens for a campaign. # noqa: E501
:return: The opens_total of this Opens. # noqa: E501
:rtype: int
"""
return self._opens_total
@opens_total.setter
def opens_total(self, opens_total):
"""Sets the opens_total of this Opens.
The total number of opens for a campaign. # noqa: E501
:param opens_total: The opens_total of this Opens. # noqa: E501
:type: int
"""
self._opens_total = opens_total
@property
def unique_opens(self):
"""Gets the unique_opens of this Opens. # noqa: E501
The total number of unique opens. # noqa: E501
:return: The unique_opens of this Opens. # noqa: E501
:rtype: int
"""
return self._unique_opens
@unique_opens.setter
def unique_opens(self, unique_opens):
"""Sets the unique_opens of this Opens.
The total number of unique opens. # noqa: E501
:param unique_opens: The unique_opens of this Opens. # noqa: E501
:type: int
"""
self._unique_opens = unique_opens
@property
def open_rate(self):
"""Gets the open_rate of this Opens. # noqa: E501
The number of unique opens divided by the total number of successful deliveries. # noqa: E501
:return: The open_rate of this Opens. # noqa: E501
:rtype: float
"""
return self._open_rate
@open_rate.setter
def open_rate(self, open_rate):
"""Sets the open_rate of this Opens.
The number of unique opens divided by the total number of successful deliveries. # noqa: E501
:param open_rate: The open_rate of this Opens. # noqa: E501
:type: float
"""
self._open_rate = open_rate
@property
def last_open(self):
"""Gets the last_open of this Opens. # noqa: E501
The date and time of the last recorded open in ISO 8601 format. # noqa: E501
:return: The last_open of this Opens. # noqa: E501
:rtype: datetime
"""
return self._last_open
@last_open.setter
def last_open(self, last_open):
"""Sets the last_open of this Opens.
The date and time of the last recorded open in ISO 8601 format. # noqa: E501
:param last_open: The last_open of this Opens. # noqa: E501
:type: datetime
"""
self._last_open = last_open
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Opens, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Opens):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/problem_detail_document.py
```python
import pprint
import re # noqa: F401
import six
class ProblemDetailDocument(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'str',
'title': 'str',
'status': 'int',
'detail': 'str',
'instance': 'str'
}
attribute_map = {
'type': 'type',
'title': 'title',
'status': 'status',
'detail': 'detail',
'instance': 'instance'
}
def __init__(self, type=None, title=None, status=None, detail=None, instance=None): # noqa: E501
"""ProblemDetailDocument - a model defined in Swagger""" # noqa: E501
self._type = None
self._title = None
self._status = None
self._detail = None
self._instance = None
self.discriminator = None
self.type = type
self.title = title
self.status = status
self.detail = detail
self.instance = instance
@property
def type(self):
"""Gets the type of this ProblemDetailDocument. # noqa: E501
An absolute URI that identifies the problem type. When dereferenced, it should provide human-readable documentation for the problem type. # noqa: E501
:return: The type of this ProblemDetailDocument. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this ProblemDetailDocument.
An absolute URI that identifies the problem type. When dereferenced, it should provide human-readable documentation for the problem type. # noqa: E501
:param type: The type of this ProblemDetailDocument. # noqa: E501
:type: str
"""
if type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def title(self):
"""Gets the title of this ProblemDetailDocument. # noqa: E501
A short, human-readable summary of the problem type. It shouldn't change based on the occurrence of the problem, except for purposes of localization. # noqa: E501
:return: The title of this ProblemDetailDocument. # noqa: E501
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this ProblemDetailDocument.
A short, human-readable summary of the problem type. It shouldn't change based on the occurrence of the problem, except for purposes of localization. # noqa: E501
:param title: The title of this ProblemDetailDocument. # noqa: E501
:type: str
"""
if title is None:
raise ValueError("Invalid value for `title`, must not be `None`") # noqa: E501
self._title = title
@property
def status(self):
"""Gets the status of this ProblemDetailDocument. # noqa: E501
The HTTP status code (RFC2616, Section 6) generated by the origin server for this occurrence of the problem. # noqa: E501
:return: The status of this ProblemDetailDocument. # noqa: E501
:rtype: int
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ProblemDetailDocument.
The HTTP status code (RFC2616, Section 6) generated by the origin server for this occurrence of the problem. # noqa: E501
:param status: The status of this ProblemDetailDocument. # noqa: E501
:type: int
"""
if status is None:
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def detail(self):
"""Gets the detail of this ProblemDetailDocument. # noqa: E501
A human-readable explanation specific to this occurrence of the problem. [Learn more about errors](/developer/guides/get-started-with-mailchimp-api-3/#Errors). # noqa: E501
:return: The detail of this ProblemDetailDocument. # noqa: E501
:rtype: str
"""
return self._detail
@detail.setter
def detail(self, detail):
"""Sets the detail of this ProblemDetailDocument.
A human-readable explanation specific to this occurrence of the problem. [Learn more about errors](/developer/guides/get-started-with-mailchimp-api-3/#Errors). # noqa: E501
:param detail: The detail of this ProblemDetailDocument. # noqa: E501
:type: str
"""
if detail is None:
raise ValueError("Invalid value for `detail`, must not be `None`") # noqa: E501
self._detail = detail
@property
def instance(self):
"""Gets the instance of this ProblemDetailDocument. # noqa: E501
A string that identifies this specific occurrence of the problem. Please provide this ID when contacting support. # noqa: E501
:return: The instance of this ProblemDetailDocument. # noqa: E501
:rtype: str
"""
return self._instance
@instance.setter
def instance(self, instance):
"""Sets the instance of this ProblemDetailDocument.
A string that identifies this specific occurrence of the problem. Please provide this ID when contacting support. # noqa: E501
:param instance: The instance of this ProblemDetailDocument. # noqa: E501
:type: str
"""
if instance is None:
raise ValueError("Invalid value for `instance`, must not be `None`") # noqa: E501
self._instance = instance
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ProblemDetailDocument, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProblemDetailDocument):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
```
#### File: mailchimp_marketing_asyncio/models/subscriber_in_automation_queue2.py
```python
import pprint
import re # noqa: F401
import six
class SubscriberInAutomationQueue2(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'workflow_id': 'str',
'email_id': 'str',
'list_id': 'str',
'list_is_active': 'bool',
'email_address': 'str',
'next_send': 'datetime',
'links': 'list[ResourceLink]'
}
attribute_map = {
'id': 'id',
'workflow_id': 'workflow_id',
'email_id': 'email_id',
'list_id': 'list_id',
'list_is_active': 'list_is_active',
'email_address': 'email_address',
'next_send': 'next_send',
'links': '_links'
}
def __init__(self, id=None, workflow_id=None, email_id=None, list_id=None, list_is_active=None, email_address=None, next_send=None, links=None): # noqa: E501
"""SubscriberInAutomationQueue2 - a model defined in Swagger""" # noqa: E501
self._id = None
self._workflow_id = None
self._email_id = None
self._list_id = None
self._list_is_active = None
self._email_address = None
self._next_send = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if workflow_id is not None:
self.workflow_id = workflow_id
if email_id is not None:
self.email_id = email_id
if list_id is not None:
self.list_id = list_id
if list_is_active is not None:
self.list_is_active = list_is_active
if email_address is not None:
self.email_address = email_address
if next_send is not None:
self.next_send = next_send
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this SubscriberInAutomationQueue2. # noqa: E501
The MD5 hash of the lowercase version of the list member's email address. # noqa: E501
:return: The id of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SubscriberInAutomationQueue2.
The MD5 hash of the lowercase version of the list member's email address. # noqa: E501
:param id: The id of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._id = id
@property
def workflow_id(self):
"""Gets the workflow_id of this SubscriberInAutomationQueue2. # noqa: E501
A string that uniquely identifies an Automation workflow. # noqa: E501
:return: The workflow_id of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._workflow_id
@workflow_id.setter
def workflow_id(self, workflow_id):
"""Sets the workflow_id of this SubscriberInAutomationQueue2.
A string that uniquely identifies an Automation workflow. # noqa: E501
:param workflow_id: The workflow_id of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._workflow_id = workflow_id
@property
def email_id(self):
"""Gets the email_id of this SubscriberInAutomationQueue2. # noqa: E501
A string that uniquely identifies an email in an Automation workflow. # noqa: E501
:return: The email_id of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._email_id
@email_id.setter
def email_id(self, email_id):
"""Sets the email_id of this SubscriberInAutomationQueue2.
A string that uniquely identifies an email in an Automation workflow. # noqa: E501
:param email_id: The email_id of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._email_id = email_id
@property
def list_id(self):
"""Gets the list_id of this SubscriberInAutomationQueue2. # noqa: E501
A string that uniquely identifies a list. # noqa: E501
:return: The list_id of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._list_id
@list_id.setter
def list_id(self, list_id):
"""Sets the list_id of this SubscriberInAutomationQueue2.
A string that uniquely identifies a list. # noqa: E501
:param list_id: The list_id of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._list_id = list_id
@property
def list_is_active(self):
"""Gets the list_is_active of this SubscriberInAutomationQueue2. # noqa: E501
The status of the list used, namely if it's deleted or disabled. # noqa: E501
:return: The list_is_active of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: bool
"""
return self._list_is_active
@list_is_active.setter
def list_is_active(self, list_is_active):
"""Sets the list_is_active of this SubscriberInAutomationQueue2.
The status of the list used, namely if it's deleted or disabled. # noqa: E501
:param list_is_active: The list_is_active of this SubscriberInAutomationQueue2. # noqa: E501
:type: bool
"""
self._list_is_active = list_is_active
@property
def email_address(self):
"""Gets the email_address of this SubscriberInAutomationQueue2. # noqa: E501
The list member's email address. # noqa: E501
:return: The email_address of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: str
"""
return self._email_address
@email_address.setter
def email_address(self, email_address):
"""Sets the email_address of this SubscriberInAutomationQueue2.
The list member's email address. # noqa: E501
:param email_address: The email_address of this SubscriberInAutomationQueue2. # noqa: E501
:type: str
"""
self._email_address = email_address
@property
def next_send(self):
"""Gets the next_send of this SubscriberInAutomationQueue2. # noqa: E501
The date and time of the next send for the workflow email in ISO 8601 format. # noqa: E501
:return: The next_send of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: datetime
"""
return self._next_send
@next_send.setter
def next_send(self, next_send):
"""Sets the next_send of this SubscriberInAutomationQueue2.
The date and time of the next send for the workflow email in ISO 8601 format. # noqa: E501
:param next_send: The next_send of this SubscriberInAutomationQueue2. # noqa: E501
:type: datetime
"""
self._next_send = next_send
@property
def links(self):
"""Gets the links of this SubscriberInAutomationQueue2. # noqa: E501
A list of link types and descriptions for the API schema documents. # noqa: E501
:return: The links of this SubscriberInAutomationQueue2. # noqa: E501
:rtype: list[ResourceLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this SubscriberInAutomationQueue2.
A list of link types and descriptions for the API schema documents. # noqa: E501
:param links: The links of this SubscriberInAutomationQueue2. # noqa: E501
:type: list[ResourceLink]
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SubscriberInAutomationQueue2, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SubscriberInAutomationQueue2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
``` |
{
"source": "JohnPaton/airbase",
"score": 3
} |
#### File: airbase/airbase/util.py
```python
import datetime
from .resources import (
LINK_LIST_URL_TEMPLATE,
CURRENT_YEAR,
DATE_FMT,
ALL_SOURCES,
)
def string_safe_list(obj):
"""
Turn an (iterable) object into a list. If it is a string or not
iterable, put the whole object into a list of length 1.
:param obj:
:return list:
"""
if isinstance(obj, str) or not hasattr(obj, "__iter__"):
return [obj]
else:
return list(obj)
def countries_from_summary(summary):
"""
Get the list of unique countries from the summary.
:param list[dict] summary: The E1a summary.
:return list[str]: The available countries.
"""
return list({d["ct"] for d in summary})
def pollutants_from_summary(summary):
"""
Get the list of unique pollutants from the summary.
:param list[dict] summary: The E1a summary.
:return dict: The available pollutants, with name ("pl") as key
and pollutant number ("shortpl") as value.
"""
return {d["pl"]: d["shortpl"] for d in summary}
def pollutants_per_country(summary):
"""
Get the available pollutants per country from the summary.
:param list[dict] summary: The E1a summary.
:return dict[list[dict]]: All available pollutants per country.
"""
output = dict()
for d in summary.copy():
country = d.pop("ct")
if country in output:
output[country].append(d)
else:
output[country] = [d]
return output
def link_list_url(
country,
shortpl=None,
year_from="2013",
year_to=CURRENT_YEAR,
source="All",
update_date=None,
):
"""
Generate the URL where the download links for a query can be found.
:param str country: The 2-letter country code. See
AirbaseClient.countries for options.
:param str shortpl: (optional) The pollutant number. Leave blank to
get all pollutants. See AirbaseClient.pollutants_per_country for
options.
:param str year_from: (optional) The first year of data. Can not be
earlier than 2013. Default 2013.
:param str year_to: (optional) The last year of data. Can not be
later than the current year. Default <current year>.
:param str source: (optional) One of "E1a", "E2a" or "All". E2a
(UTD) data are only available for years where E1a data have not
yet been delivered (this will normally be the most recent year).
Default "All".
:param str|datetime update_date: (optional). Format
"yyyy-mm-dd hh:mm:ss". To be used when only files created or
updated after a certain date is of interest.
:return str: The URL which will yield the list of relevant CSV
download links.
"""
shortpl = shortpl or ""
if int(year_from) < 2013:
raise ValueError("'year_from' must be at least 2013")
year_from = str(int(year_from))
if int(year_to) > int(CURRENT_YEAR):
raise ValueError("'year_to' must be at most " + str(CURRENT_YEAR))
year_to = str(int(year_to))
if isinstance(update_date, datetime.datetime):
update_date = update_date.strftime(DATE_FMT)
update_date = update_date or ""
if source is not None and source not in ALL_SOURCES:
raise ValueError("'source' must be one of: " + ",".join(ALL_SOURCES))
source = source or ""
return LINK_LIST_URL_TEMPLATE.format(
country=country,
shortpl=shortpl,
year_from=year_from,
year_to=year_to,
source=source,
update_date=update_date,
)
def extract_csv_links(text):
"""Get a list of csv links from the download link response text"""
links = text.replace("\r", "").split("\n")
links.remove("")
return links
``` |
{
"source": "JohnPaton/blackjack",
"score": 4
} |
#### File: python/blackjack/deck.py
```python
import random
class Card():
"""A playing card.
Attributes:
suit (str): the suit of the card
value (int): the value of the card (2-11)
str (str): the card, e.g. CK or H9
"""
def __init__(self, suit, value):
"""Create a playing card.
Args:
suit (str): one of 'S','H','C','D'
value (str or int): one of 2-10 or 'J','Q','K','A'
"""
self.suit = suit
self.str = str(suit) + str(value)
try:
val = int(value)
except:
if value in ['J', 'Q', 'K']:
val = 10
elif value == 'A':
val = 11
self.value = val
def __str__(self):
return self.str
class Deck():
"""A deck of cards.
Attributes:
cards (list): Cards to be drawn
drawn (list): Cards that have been drawn
Methods:
shuffle: shuffle the cards
draw: draw a card
"""
suits = ['S', 'D', 'C', 'H']
values = list(range(2, 11)) + ['J', 'Q', 'K', 'A']
def __init__(self):
self.cards = [Card(s, v) for v in self.values for s in self.suits]
self.drawn = []
def shuffle(self, reset=False):
"""Shuffle the cards.
Reset all cards to be back in the pile if requested.
"""
if reset:
self.__init__()
random.shuffle(self.cards)
return self
def draw(self):
"""Draw a card."""
card = self.cards.pop(0)
self.drawn.append(card)
return card
```
#### File: python/blackjack/player.py
```python
class Player():
"""A blackjack player.
Attributes:
cards (list): The current cards in the player's hand
deck (Deck): The deck in play
standing (bool): Whether the player has stood this round
Methods:
draw: draw a card
reset: empty hand and stop standing
score: score of the player's current cards
turn: take a turn (hit or stand)
"""
def __init__(self, deck):
self.deck = deck
self.cards = []
self.standing = False
def draw(self):
"""Draw a card into the player's hand."""
card = self.deck.draw()
self.cards.append(card)
return card
def score(self):
"""The score of the player's current hand."""
if self.cards:
values = [card.value for card in self.cards]
total = sum(values)
else:
total = 0
for value in values:
# Aces (value 11) can be worth 1 if the score is above 21
if value == 11 and total > 21:
total -= 10
return total
def turn(self):
"""Take a turn (if not already standing)."""
move = ''
# can't take a turn if standing
while move not in ['hit', 'h', 'stand', 's'] and not self.standing:
move = input('(h)it or (s)tand? ').lower()
if move in ['hit', 'h']:
self.draw()
else:
self.standing = True
def reset(self):
"""Return cards and stop standing."""
self.cards = []
self.standing = False
class Dealer(Player):
"""A blackjack dealer.
Inherits from Player.
Attributes:
standing (bool): Whether the dealer is standing
Methods:
turn: hit if score is <= 16, else stand.
"""
def turn(self):
"""Take a turn."""
if self.score() <= 16 and not self.standing:
self.draw()
else:
self.standing = True
``` |
{
"source": "JohnPaton/huawei-hg659",
"score": 3
} |
#### File: JohnPaton/huawei-hg659/client.py
```python
import re
import json
import requests
from bs4 import BeautifulSoup
from . import util
class HG659Client:
_response_data_rx = re.compile(r"/\*(.*)\*/$")
def __init__(self, host, username, password):
"""
A client for the Huawei HG659 router.
:param host: The IP of the router, e.g. "192.168.1.1"
:param username: The login username
:param password: The login password
"""
self.host = host
self.username = username
self.password = password
self._csrf_param = None
self._csrf_token = None
# Always use session to maintain cookies
self._session = requests.Session()
# init csrf state
self._refresh_csrf()
def login(self):
"""
Log the client in to the router.
While logged in, the same user cannot log in to the web
interface. Call .logout() to log back out and unblock the web
interface again
:return: The response data from the login attempt
"""
self._refresh_csrf()
data = self._auth_data()
response = self._post("/api/system/user_login", json=data)
output = self._extract_json(response.text)
assert output, f"Error logging in. Response content: {response.text}"
return self._extract_json(response.text)
def logout(self):
"""
Log the client out of the router
:return: The response status of the logout request
"""
data = self._csrf_data()
response = self._post("/api/system/user_logout", json=data)
return response.status_code
def get_devices(self):
"""
List all devices known to the router
:return: A list of dicts containing device info
"""
response = self._get("/api/system/HostInfo")
output = self._extract_json(response.text)
assert output, f"Error getting devices. Response content: {response.text}"
return output
@property
def password(self):
return self._password
@password.setter
def password(self, value):
self._password = util.base64(util.sha256(value))
def _request(self, method, path, **kwargs):
url = f"http://{self.host}/{path.lstrip('/')}"
kwargs.setdefault("timeout", 2)
response = self._session.request(method, url, **kwargs,)
response.raise_for_status()
param, token = self._extract_csrf(response.text)
if param and token:
self._csrf_param = param
self._csrf_token = token
return response
def _get(self, path, **kwargs):
return self._request("GET", path, **kwargs)
def _post(self, path, **kwargs):
return self._request("POST", path, **kwargs)
def _refresh_csrf(self):
self._get("/", timeout=1)
@staticmethod
def _extract_csrf(response_text):
"""Extract the csrf tokens from an HTML response"""
param, token = None, None
soup = BeautifulSoup(response_text, features="html.parser")
param_elem = soup.find("meta", attrs={"name": "csrf_param"})
if param_elem:
param = param_elem.attrs.get("content")
token_elem = soup.find("meta", attrs={"name": "csrf_token"})
if token_elem:
token = token_elem.attrs.get("content")
return param, token
@classmethod
def _extract_json(cls, response_text):
"""Extract the json data from an api response"""
match = cls._response_data_rx.search(response_text)
if not match:
return None
return json.loads(match.group(1))
def _encode_password(self):
return util.sha256(
self.username + self.password + self._csrf_param + self._csrf_token
)
def _csrf_data(self):
return dict(csrf=dict(csrf_param=self._csrf_param, csrf_token=self._csrf_token))
def _auth_data(self):
data = self._csrf_data()
data.update(
dict(data=dict(UserName=self.username, Password=self._encode_password()))
)
return data
def __del__(self):
try:
self.logout()
except requests.exceptions.HTTPError as e:
if str(e).startswith("404"):
# Weren't logged in, no worries
pass
```
#### File: JohnPaton/huawei-hg659/device_tracker.py
```python
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.helpers.device_registry import format_mac
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
from .client import HG659Client
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
}
)
def get_scanner(hass, config):
_LOGGER.info("Setting up HG659DeviceScanner")
scanner = HG659DeviceScanner(config[DOMAIN])
_LOGGER.info("HG659DeviceScanner connected")
return scanner
class HG659DeviceScanner(DeviceScanner):
def __init__(self, config):
super().__init__()
_LOGGER.debug("Initiating HG659 client")
self.client = HG659Client(
host=config[CONF_HOST],
username=config[CONF_USERNAME],
password=config[CONF_PASSWORD],
)
_LOGGER.debug("HG659 client initiated")
self._devices = dict()
def scan_devices(self):
try:
_LOGGER.debug("Logging in to router")
output = self.client.login()
_LOGGER.debug(f"Logged in, output: {output}")
_LOGGER.debug(f"Getting devices")
devices_router = self.client.get_devices()
_LOGGER.debug(f"Got {len(devices_router)} active + inactive devices")
finally:
# Don't forget to logout since otherwise the web interface
# will be blocked for the user
status = self.client.logout()
_LOGGER.debug(f"Logged out (status: {status})")
devices_hass = [
dict(
id=format_mac(d["MACAddress"]),
source_type="router",
is_connected=d["Active"],
ip_address=d["IPAddress"],
mac_address=d["MACAddress"],
hostname=d["HostName"],
)
for d in devices_router
if d["Active"] # only include active devices
]
_LOGGER.debug(f"{len(devices_hass)} devices were active")
self._devices = {d["mac_address"]: d for d in devices_hass}
return list(self._devices.keys())
def get_device_name(self, device: str):
d = self._devices.get(device)
if d:
return d["hostname"]
def get_extra_attributes(self, device: str):
return self._devices.get(device)
``` |
{
"source": "JohnPaton/nbviewerbot",
"score": 3
} |
#### File: nbviewerbot/nbviewerbot/resources.py
```python
import os
import re
import logging
import pickle
import dotenv
import praw
# Relevant directories
SRC_DIR = os.path.dirname(__file__)
RESOURCES_DIR = os.path.join(SRC_DIR, "resources.d")
PROJECT_DIR = os.path.realpath(os.path.join(SRC_DIR, ".."))
# Logging
LOGFILE_PATH = os.path.join(PROJECT_DIR, "nbviewerbot.log")
LOGGER = logging.getLogger("nbviewerbot")
# Reddit auth info from PROJECT_DIR/.env
DOTENV_PATH = os.path.join(SRC_DIR, ".env")
dotenv.load_dotenv(DOTENV_PATH)
# Reddit authentication
def get_reddit_auth_kwargs():
"""Get the authentication kwargs for praw.Reddit from the environment.
Requires the following environment variables to be set:
* CLIENT_ID : the ID of your script application
* CLIENT_SECRET : the secret of your script application
* USERNAME : the username of your bot's Reddit account
* PASSWORD : the password of your bot's Reddit account
See https://github.com/reddit-archive/reddit/wiki/OAuth2-Quick-Start-Example
for more details.
"""
kwargs = dict()
kwargs["client_id"] = os.environ.get("CLIENT_ID")
kwargs["client_secret"] = os.environ.get("CLIENT_SECRET")
kwargs["username"] = os.environ.get("USERNAME")
kwargs["password"] = os.environ.get("PASSWORD")
kwargs["user_agent"] = "python:nbviewerbot:v0.1.0 (by /u/jd_paton)"
for key, value in kwargs.items():
if value is None:
raise KeyError(
"{} not found in environment variables. "
"Have you filled in your .env file?".format(key.upper())
)
return kwargs
def load_reddit():
"""
Get the authentication kwargs from the environment and authenticate with
Reddit.
Returns
-------
praw.Reddit : the authenticated Reddit client
See also: utils.get_reddit_auth_kwargs
"""
kwargs = get_reddit_auth_kwargs()
reddit = praw.Reddit(**kwargs)
LOGGER.info(
"Successfully authenticated with Reddit as {}".format(
reddit.user.me().name
)
)
return reddit
# Templates (for use with string.format)
# TODO: Convert these all to string.Template
NBVIEWER_URL_TEMPLATE = "https://nbviewer.jupyter.org/url/{}"
BINDER_URL_TEMPLATE_NO_FILEPATH = "https://mybinder.org/v2/gh/{}/{}"
BINDER_URL_TEMPLATE_WITH_FILEPATH = (
"https://mybinder.org/v2/gh/{}/{}?filepath={}"
)
_comment_footer = """
------
^(I am a bot.)
[^(Feedback)](https://www.reddit.com/message/compose/?to=jd_paton) ^(|)
[^(GitHub)](https://github.com/JohnPaton/nbviewerbot) ^(|)
[^(Author)](https://johnpaton.net/)
"""
COMMENT_TEMPLATE_SINGLE = (
"""
I see you've posted a GitHub link to a Jupyter Notebook! GitHub doesn't
render large Jupyter Notebooks, so just in case, here is an
[nbviewer](https://nbviewer.jupyter.org/) link to the notebook:
{}
Want to run the code yourself? Here is a [binder](https://mybinder.org/)
link to start your own Jupyter server and try it out!
{}
"""
+ _comment_footer
)
COMMENT_TEMPLATE_MULTI = (
"""
I see you've posted GitHub links to Jupyter Notebooks! GitHub doesn't
render large Jupyter Notebooks, so just in case here are
[nbviewer](https://nbviewer.jupyter.org/) links to the notebooks:
{}
Want to run the code yourself? Here are [binder](https://mybinder.org/)
links to start your own Jupyter server!
{}
"""
+ _comment_footer
)
# Regexes
_url_rx = "^http.*"
URL_RX = re.compile(_url_rx)
# Subreddit lists
SUBREDDITS_TEST = [
"testingground4bots",
"bottestingplace",
"bottesting",
"bottest",
]
SUBREDDITS_RELEVANT_PATH = os.path.join(RESOURCES_DIR, "subreddits.txt")
with open(SUBREDDITS_RELEVANT_PATH, "r") as h:
_raw = h.readlines()
# strip whitespace and drop empty lines
SUBREDDITS_RELEVANT = [sub.strip() for sub in _raw]
SUBREDDITS_RELEVANT = [sub for sub in SUBREDDITS_RELEVANT if sub]
SUBREDDITS_RELEVANT += SUBREDDITS_TEST
SUBREDDITS_ALL = ["all"]
```
#### File: nbviewerbot/nbviewerbot/utils.py
```python
import urllib
import logging
import pickle
from queue import Full
from bs4 import BeautifulSoup
from nbviewerbot import resources
def parse_url_if_not_parsed(url):
"""
Return the urllib.parse.ParseResult for URL if it is not already parsed.
Parameters
----------
url : str or urllib.parse.ParseResult
Returns
-------
urllib.parse.ParseResult : The parsed URL
"""
if type(url) is urllib.parse.ParseResult:
return url
else:
return urllib.parse.urlparse(url)
def is_github_jupyter_url(url):
"""
Test if a url is a github jupyter url
Parameters
----------
url : str or urllib.parse.ParseResult
Returns
-------
True if the host is 'github' and the path contains '.ipynb',
else False
"""
parsed = parse_url_if_not_parsed(url)
return "github" in parsed.netloc.lower() and ".ipynb" in parsed.path.lower()
def get_notebook_path(url):
"""
Convert a full URL into a path. Removes http(s):// and www. if present.
Parameters
----------
url : str or urllib.parse.ParseResult
Returns
-------
str : the path
Examples
--------
>>> nbviewerbot.utils.get_notebook_path(
... 'https://www.github.com/JohnPaton/numpy-neural-networks/blob/'
... 'master/01-single-layer-perceptron.ipynb'
... )
'github.com/JohnPaton/numpy-neural-networks/blob/master/01-single-layer-perceptron.ipynb'
"""
parsed = parse_url_if_not_parsed(url)
return parsed.netloc.replace("www.", "") + parsed.path
def get_github_info(url):
"""
Get the repo, branch and (optional) filepath from a github url
Parameters
----------
url
Returns
-------
repo, branch, filepath (if present)
"""
parsed = parse_url_if_not_parsed(url)
assert "github" in parsed.netloc.lower(), "Must be a github url"
assert len(parsed.path.split("/")) >= 3, "Must be at least a path to a repo"
path_elements = parsed.path.split("/") # drop the first slash
repo = "/".join(path_elements[1:3])
branch = "master"
filepath = None
if len(path_elements) >= 5:
branch = path_elements[4]
if len(path_elements) >= 6:
filepath = "/".join(path_elements[5:])
return repo, branch, filepath
def get_all_links(html):
"""
Parse HTML and extract all http(s) hyperlink destinations
Parameters
----------
html : str
Returns
-------
list[str] : the found URLs (if any)
"""
soup = BeautifulSoup(html, features="html.parser")
links = soup.find_all("a", attrs={"href": resources.URL_RX})
return [link.get("href") for link in links]
def get_github_jupyter_links(html):
"""
Parse HTML and exract all links to Jupyter Notebooks hosted on GitHub
Parameters
----------
html : str
Returns
-------
list[str] : the found URLs (if any)
See also: utils.is_github_jupyter_url
"""
links = get_all_links(html)
return [link for link in links if is_github_jupyter_url(link)]
def get_comment_jupyter_links(comment):
"""Extract jupyter lins from a comment, if any"""
html = comment.body_html
jupy_links = get_github_jupyter_links(html)
return jupy_links
def get_submission_jupyter_links(submission):
"""Extract jupyer links from a submission, if any"""
jupy_links = []
if submission.selftext_html is not None:
# self post, read html
html = submission.selftext_html
jupy_links += get_github_jupyter_links(html)
if is_github_jupyter_url(submission.url):
jupy_links += [submission.url]
# dedupe
jupy_links = list(dict.fromkeys(jupy_links))
return jupy_links
def setup_logger(console_level=logging.INFO, file_level=logging.DEBUG):
"""
Set up the nbviewerbot with a level for console logging and a level for
file logging. If either level is None, do not log to that destination.
Parameters
----------
console_level : int or None
The log level for the console
file_level : int or None
The log level for the file
Returns
-------
logger
"""
logger = logging.getLogger("nbviewerbot")
logger.setLevel(logging.DEBUG)
fmt = logging.Formatter(
"%(asctime)s %(levelname)s(%(threadName)s) - %(message)s"
)
if console_level is not None:
sh = logging.StreamHandler()
sh.setLevel(console_level)
sh.setFormatter(fmt)
logger.addHandler(sh)
if file_level is not None:
fh = logging.FileHandler(resources.LOGFILE_PATH)
fh.setLevel(file_level)
fh.setFormatter(fmt)
logger.addHandler(fh)
return logger
def praw_object_type(praw_obj):
"""Return the type of the praw object (comment/submission) as a
lowercase string."""
return type(praw_obj).__name__.lower()
def raise_on_exception(e):
"""Raises exception e"""
raise e
def load_queue(queue, iterable, stop_event=None):
"""Put items from iterable into queue as they become available
Stops when stop_event is set if provided, else continues forever.
If the item is None, it will be skipped. This can be used to more
regularly check for stop_event being set (pass None though the
iterator to check the event and then continue iterating).
"""
while not stop_event.is_set():
for i in iterable:
if i is None or stop_event.is_set():
break
while not stop_event.is_set():
try:
queue.put(i, timeout=1.0)
resources.LOGGER.debug("Queued item {}".format(i))
break
except Full:
resources.LOGGER.warn("Destination queue is full")
resources.LOGGER.info("Stop signal received, stopping")
```
#### File: nbviewerbot/tests/test_resources.py
```python
from nbviewerbot import resources
import dotenv
import os
import pytest
TEST_DIR = os.path.join(os.path.dirname(__file__))
DOTENV_PATH = os.path.join(TEST_DIR, ".env_test")
class TestGetRedditAuthKwargs:
def test_reads_environ(self):
dotenv_path = DOTENV_PATH # make local for output
dotenv.load_dotenv(dotenv_path, override=True)
kwargs = resources.get_reddit_auth_kwargs()
assert kwargs["username"] == "username"
assert kwargs["password"] == "password"
assert kwargs["client_id"] == "client_id"
assert kwargs["client_secret"] == "client_secret"
def test_raises_if_missing(self):
dotenv_path = DOTENV_PATH # make local for output
required_env_vars = [
"USERNAME",
"PASSWORD",
"CLIENT_ID",
"CLIENT_SECRET",
]
for key in required_env_vars:
dotenv.load_dotenv(dotenv_path, override=True)
os.environ.pop(key)
with pytest.raises(KeyError):
resources.get_reddit_auth_kwargs()
class TestSubredditsRelevant:
def test_has_subs(self):
assert len(resources.SUBREDDITS_RELEVANT) > 0
def test_no_empty_strings(self):
assert "" not in resources.SUBREDDITS_RELEVANT
``` |
{
"source": "JohnPaton/ratelimitqueue",
"score": 4
} |
#### File: ratelimitqueue/examples/example_workers.py
```python
import ratelimitqueue
import multiprocessing.dummy
import time
import random
def make_call_to_slow_api(url):
time.sleep(random.uniform(1, 2))
print("Calling:", url)
LIST_OF_URLS = ["https://example.com/{}".format(i) for i in range(25)]
rlq = ratelimitqueue.RateLimitQueue(calls=3, per=2)
n_workers = 4
def worker(rlq):
"""Makes API calls on URLs from queue until it is empty."""
while rlq.qsize() > 0:
url = rlq.get()
make_call_to_slow_api(url)
rlq.task_done()
# load up the queue
for url in LIST_OF_URLS:
rlq.put(url)
# make the calls
with multiprocessing.dummy.Pool(n_workers, worker, (rlq,)) as pool:
rlq.join()
```
#### File: ratelimitqueue/ratelimitqueue/utils.py
```python
import time
import queue
def get_time_remaining(start, timeout=None):
if timeout is None:
return None
else:
time_elapsed = start - time.time()
return timeout - time_elapsed
```
#### File: ratelimitqueue/tests/test_ratelimitqueue.py
```python
import pytest
from unittest import mock
from .utils import almost
from ratelimitqueue.ratelimitqueue import RateLimitQueue, RateLimitException
from ratelimitqueue.ratelimitqueue import RateLimitLifoQueue, RateLimitGetMixin
from ratelimitqueue.ratelimitqueue import RateLimitPriorityQueue
from queue import Empty
import time
import random
# take randomness out of fuzzing
random.uniform = mock.Mock(side_effect=lambda a, b: a + (b - a) / 2)
class GetMixinTester:
# tests of __init__()
def test_no_calls(self):
rlq = self.QueueClass()
del rlq.calls
rlq.put(1)
with pytest.raises(AttributeError):
rlq.get()
def test_no_per(self):
rlq = self.QueueClass()
del rlq.per
rlq.put(1)
with pytest.raises(AttributeError):
rlq.get()
def test_no_fuzz(self):
rlq = self.QueueClass()
del rlq.fuzz
rlq.put(1)
with pytest.raises(AttributeError):
rlq.get()
def test_no__call_log(self):
rlq = self.QueueClass()
del rlq._call_log
rlq.put(1)
with pytest.raises(AttributeError):
rlq.get()
def test_no_get(self):
class DummyParent:
fuzz = 0
per = 0
calls = 0
_call_log = 0
class DummyChild(RateLimitGetMixin, DummyParent):
pass
dc = DummyChild()
with pytest.raises(AttributeError):
dc.get()
def test_maxsize(self):
rlq = self.QueueClass(0)
assert rlq.maxsize == 0
rlq = self.QueueClass(3)
assert rlq.maxsize == 3
def test_calls_not_less_than_1(self):
with pytest.raises(ValueError):
rlq = self.QueueClass(1, 0, 10)
with pytest.raises(ValueError):
rlq = self.QueueClass(1, -1, 10)
# tests of get()
def test_timeout_not_less_than_0(self):
rlq = self.QueueClass()
rlq.put(1)
with pytest.raises(ValueError):
rlq.get(timeout=-1)
def test_item_in_queue(self):
rlq = self.QueueClass()
rlq.put(1)
assert rlq.get() == 1
def test_first_put_fast(self):
rlq = self.QueueClass()
start = time.time()
rlq.put(1)
rlq.get()
assert almost(0, time.time() - start)
def test_default_rate_limit(self):
rlq = self.QueueClass()
start = time.time()
rlq.put(1)
rlq.put(1)
rlq.get()
rlq.get()
assert almost(1, time.time() - start)
def test_rate_limit_calls(self):
rlq = self.QueueClass(calls=2)
start = time.time()
rlq.put(1)
rlq.put(1)
rlq.put(1)
rlq.put(1)
rlq.get()
rlq.get()
rlq.get()
assert almost(1, time.time() - start)
rlq.get()
assert almost(1, time.time() - start)
def test_rate_limit_per(self):
rlq = self.QueueClass(per=0.5)
start = time.time()
rlq.put(1)
rlq.put(1)
rlq.put(1)
rlq.get()
rlq.get()
rlq.get()
assert almost(1, time.time() - start)
def test_rate_limit_calls_per(self):
rlq = self.QueueClass(calls=2, per=0.5)
start = time.time()
rlq.put(1)
rlq.put(1)
rlq.put(1)
rlq.put(1)
rlq.get()
rlq.get()
rlq.get()
assert almost(0.5, time.time() - start)
rlq.get()
assert almost(0.5, time.time() - start)
def test_not_block_raises_rate_limit(self):
rlq = self.QueueClass(calls=1, per=3)
rlq.put(1)
rlq.put(1)
rlq.get()
with pytest.raises(RateLimitException):
rlq.get(block=False)
def test_not_block_raises_empty(self):
rlq = self.QueueClass(calls=1, per=0)
with pytest.raises(Empty):
rlq.get(block=False)
def test_timeout_on_rate_limit_raises_rate_limit(self):
rlq = self.QueueClass(per=10)
rlq.put(1)
rlq.put(1)
rlq.get()
with pytest.raises(RateLimitException):
rlq.get(timeout=1)
def test_timeout_on_queue_size_raises_empty(self):
rlq = self.QueueClass(maxsize=1, per=0)
with pytest.raises(Empty):
rlq.get(timeout=0.001)
def test_timeout_on_queue_size_timing(self):
rlq = self.QueueClass(maxsize=1, per=0)
with pytest.raises(Empty):
start = time.time()
rlq.get(timeout=0.5)
assert almost(0.5, time.time() - start)
def test_no_fuzz_when_at_rate_limit(self):
rlq = self.QueueClass(per=0.5)
rlq.put(1)
rlq.get()
rlq.fuzz = 1000
start = time.time()
rlq.put(1)
rlq.get()
assert almost(0.5, time.time() - start)
def test_fuzz(self):
rlq = self.QueueClass(per=0.5, fuzz=1)
start = time.time()
rlq.put(1)
rlq.get()
end = time.time()
assert almost(0.5, end - start)
def test_fuzz_less_than_timeout(self):
rlq = self.QueueClass(fuzz=10000)
start = time.time()
rlq.put(1)
rlq.get(timeout=0.5)
end = time.time()
elapsed = end - start
assert almost(0.5, elapsed)
class TestRateLimitQueue(GetMixinTester):
QueueClass = RateLimitQueue
def test_fifo(self):
rlq = self.QueueClass(per=0)
rlq.put(1)
rlq.put(2)
assert rlq.get() == 1
assert rlq.get() == 2
class TestRateLimitLifoQueue(GetMixinTester):
QueueClass = RateLimitLifoQueue
def test_lifo(self):
rlq = self.QueueClass(per=0)
rlq.put(1)
rlq.put(2)
assert rlq.get() == 2
assert rlq.get() == 1
class TestRateLimitPriorityQueue(GetMixinTester):
QueueClass = RateLimitPriorityQueue
def test_priority(self):
rlq = self.QueueClass(per=0)
rlq.put((4, "fourth"))
rlq.put((2, "second"))
rlq.put((1, "first"))
rlq.put((3, "third"))
assert rlq.get() == (1, "first")
assert rlq.get() == (2, "second")
assert rlq.get() == (3, "third")
assert rlq.get() == (4, "fourth")
``` |
{
"source": "john-patterson/wavvy",
"score": 3
} |
#### File: wavvy/datalayer/models.py
```python
from wavvy import app
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy(app)
__all__ = ['User', 'Adjustment']
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True, nullable=False)
password = db.Column(db.String(100), nullable=False)
team = db.Column(db.String(25), nullable=True)
def __init__(self, *, username, password, team=None):
self.username = username
self.password = password
self.team = team
def __repr__(self):
return '<User %r>' % self.username
class Adjustment(db.Model):
id = db.Column(db.Integer, primary_key=True)
old_temp = db.Column(db.Float)
new_temp = db.Column(db.Float)
outside_temp = db.Column(db.Float)
room_temp = db.Column(db.Float)
timestamp = db.Column(db.DateTime)
adjuster_id = db.Column(db.Integer, db.ForeignKey('user.id'))
adjuster = db.relationship('User',
backref=db.backref('accounts', lazy='dynamic'))
def __init__(self, *, old_temp, new_temp, outside_temp, room_temp,
timestamp, adjuster):
self.old_temp = old_temp
self.new_temp = new_temp
self.outside_temp = outside_temp
self.timestamp = timestamp
self.adjuster = adjuster
self.room_temp = room_temp
def __repr__(self):
return '<Adjustment {}>'.format(repr(self.id))
```
#### File: wavvy/datalayer/weather.py
```python
import requests
__all__ = ['OpenWeather']
class OpenWeather:
base_url = 'http://api.openweathermap.org/data/2.5/'
def __init__(self, key):
self.key = key
def kelvin_by_zip(self, zipcode):
payload = {
'zip': zipcode,
'APPID': self.key
}
return requests.get(self.base_url + '/weather', params=payload).json()['main']['temp']
```
#### File: wavvy/datalayer/wrapper.py
```python
from wavvy.datalayer.models import db
__all__ = ['DB']
class DB:
def __enter__(self):
return db.session
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
db.session.rollback()
return False
db.session.commit()
return True
``` |
{
"source": "johnpaulada/face-recognition-graphql-server",
"score": 3
} |
#### File: johnpaulada/face-recognition-graphql-server/add_face.py
```python
import os
import numpy as np
from image_tools import base64_to_embedding
def add_face(name, image_data):
embedding = base64_to_embedding(image_data)
add_name(name)
add_embedding(embedding)
def add_name(name):
with open('names.txt', 'a') as names_file:
names_file.write(name)
names_file.write('\n')
def add_embedding(embedding):
if (not os.path.exists('embeddings.npy')):
np.save('embeddings.npy', [embedding])
else:
embeddings = np.load('embeddings.npy')
embeddings = np.concatenate((embeddings, [embedding]))
np.save('embeddings.npy', embeddings)
``` |
{
"source": "johnpaulbin/banana",
"score": 2
} |
#### File: banana/banana_dev/package.py
```python
from .generics import run_main, start_main, check_main
# Generics
def run(api_key, model_key, model_inputs, strategy = {}):
out = run_main(
api_key = api_key,
model_key = model_key,
model_inputs = model_inputs,
strategy = strategy,
)
return out
def start(api_key, model_key, model_inputs, strategy = {}):
out = start_main(
api_key = api_key,
model_key = model_key,
model_inputs = model_inputs,
strategy = strategy,
)
return out
def check(api_key, task_id):
out_dict = check_main(
api_key = api_key,
task_id = task_id
)
return out_dict
``` |
{
"source": "johnpaulbin/LibreASR",
"score": 2
} |
#### File: johnpaulbin/LibreASR/api-client.py
```python
import argparse
import random
import logging
import sys
import grpc
import torchaudio
import numpy as np
import interfaces.libreasr_pb2 as ap
import interfaces.libreasr_pb2_grpc as apg
DEMO_AUDIO = "./demo/3729-6852-0035.flac"
CHUNK_DURATION = 0.08 # secs
PORT = 50052
def load_demo():
data, sr = torchaudio.load(DEMO_AUDIO)
data = data[0][None]
data = torchaudio.transforms.Resample(sr, 16000)(data)
sr = 16000
data = data.numpy().astype(np.float32).tobytes()
return data, sr
def grab_audio():
data, sr = load_demo()
return ap.Audio(data=data, sr=sr)
def grab_audio_stream(secs):
data, sr = load_demo()
slice_sz = int(secs * sr) * 4
l = len(data) // slice_sz
# [start] zero
yield ap.Audio(data=bytes([0] * slice_sz), sr=sr)
for i in range(l):
chunk = data[i * slice_sz : (i + 1) * slice_sz]
# pad with zeros
chunk = chunk + bytes([0] * (slice_sz - len(chunk)))
assert len(chunk) % 4 == 0
# [mid]
yield ap.Audio(data=chunk, sr=sr)
# [end] zero frames mark end
for _ in range(10):
yield ap.Audio(data=bytes([0] * slice_sz), sr=sr)
def test_asr(stub):
print("Transcribe:")
audio = grab_audio()
print("-", stub.Transcribe(audio).data)
print("TranscribeStream:\n- ", end="")
audio_stream = grab_audio_stream(secs=CHUNK_DURATION)
for transcript in stub.TranscribeStream(audio_stream):
print(transcript.data, end="")
print()
def run(args):
with grpc.insecure_channel(f"localhost:{PORT}") as channel:
stub = apg.ASRStub(channel)
test_asr(stub)
if __name__ == "__main__":
logging.basicConfig()
# parse args
parser = argparse.ArgumentParser(description="Process some integers.")
parser.add_argument(
"--source",
choices=["file", "stream"],
type=str,
default="file",
help="audio source",
)
parser.add_argument(
"--mode",
choices=["all", "chunks"],
default="all",
type=str,
nargs="+",
help="transcription mode (stream implies chunks)",
)
parser.add_argument(
"--file",
type=str,
default=DEMO_AUDIO,
help="if mode==file: what file to transcribe",
)
args = parser.parse_args()
run(args)
```
#### File: libreasr/lib/config.py
```python
import os
from functools import partial
from importlib import import_module
import collections.abc
from pathlib import Path
import torch
import yaml
from libreasr.lib.utils import n_params, what, wrap_transform
from libreasr.lib.language import get_language
from libreasr.lib.builder import ASRDatabunchBuilder
from libreasr.lib.data import ASRDatabunch
from libreasr.lib.models import Transducer, CTCModel
from libreasr.lib.learner import ASRLearner
from libreasr.lib.lm import load_lm
def update(d, u):
"from: https://stackoverflow.com/questions/3232943/update-value-of-a-nested-dictionary-of-varying-depth"
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = update(d.get(k, {}), v)
else:
d[k] = v
return d
def open_config(*args, path="./config/testing.yaml", **kwargs):
path = Path(path)
if not os.path.exists(path):
path = ".." / path
with open(path, "r") as stream:
try:
obj = yaml.safe_load(stream)
return obj
except yaml.YAMLError as exc:
print(exc)
def parse_transforms(conf, inference):
mod = import_module("libreasr.lib.transforms")
tfms = []
if inference:
conf_tfms = [
conf["transforms"]["x"],
conf["transforms"]["stream"],
conf["transforms"]["y"],
]
else:
conf_tfms = [conf["transforms"]["x"], conf["transforms"]["y"]]
for i, conf_one_var in enumerate(conf_tfms):
tfms_one_var = []
for j, conf_one_tfm in enumerate(conf_one_var):
args = conf_one_tfm.get("args", {})
is_partial = conf_one_tfm.get("partial", False)
is_wrap = conf_one_tfm.get("wrap", False)
func = getattr(mod, conf_one_tfm["name"])
if is_partial:
func = partial(func, **args)
if is_wrap:
func = wrap_transform(func)
tfms_one_var.append(func)
tfms.append(tfms_one_var)
return tfms
def apply_cuda_stuff(conf):
if conf["cuda"]["enable"]:
if torch.cuda.is_available():
torch.cuda.set_device(int(conf["cuda"]["device"].split(":")[1]))
torch.backends.cudnn.benchmark = conf["cuda"]["benchmark"]
else:
raise Exception("cuda not available")
def check_vocab_sz(conf):
a = conf["model"]["vocab_sz"]
b = conf["wanted_vocab_sz"]
if a != b:
raise Exception(f"vocab sizes don't match: wanted={b}, current={a}")
def check_db(db):
tpl = db.one_batch()
X, Ym, _, _ = tpl[0]
Y, Y_lens, X_lens = tpl[1]
what(X), what(X_lens), what(Y), what(Y_lens)
assert X_lens.size(0) == Y_lens.size(0)
def parse_and_apply_config(*args, inference=False, **kwargs):
# open config
conf = open_config(*args, **kwargs)
# override config for inference + language
overrides = []
if inference:
overrides.append("inference")
lang = kwargs.get("lang", "")
lang_name = lang
if len(lang) > 0:
overrides.append(lang)
for override in overrides:
update(conf, conf["overrides"][override])
# torch-specific cuda settings
apply_cuda_stuff(conf)
# grab transforms
tfms = parse_transforms(conf, inference=inference)
if not inference:
# grab builder
builder_train = ASRDatabunchBuilder.from_config(conf, mode="train")
builder_valid = ASRDatabunchBuilder.from_config(conf, mode="valid")
# grab language + sanity check
try:
lang, _ = get_language(model_file=conf["tokenizer"]["model_file"])
except:
builder_train.train_tokenizer(
model_file=conf["tokenizer"]["model_file"],
vocab_sz=conf["model"]["vocab_sz"],
)
lang, _ = get_language(model_file=conf["tokenizer"]["model_file"])
check_vocab_sz(conf)
if not inference:
# grab databunch + sanity check
db = ASRDatabunch.from_config(conf, lang, builder_train, builder_valid, tfms)
check_db(db)
# load lm
lm = None
if inference and conf["lm"]["enable"]:
try:
lm = load_lm(conf, lang_name)
print("[LM] loaded.")
except:
print("[LM] Failed to load.")
# grab model
m = Transducer.from_config(conf, lang)
# print(n_params(m))
if inference:
# load weights
from libreasr.lib.model_utils import load_asr_model
load_asr_model(m, lang_name, lang, conf["cuda"]["device"], lm=lm)
m.lm = lm
m.lang = lang
# eval mode
m.eval()
return conf, lang, m, tfms
else:
# grab learner
learn = ASRLearner.from_config(conf, db, m)
return conf, lang, builder_train, builder_valid, db, m, learn
```
#### File: libreasr/lib/decoders.py
```python
from itertools import groupby
from operator import itemgetter
import torch
import torch.nn as nn
import torch.nn.functional as F
def remove_duplicates(l):
return list(map(itemgetter(0), groupby(l)))
def remove_blanks(l, blank=0):
return list(filter(lambda x: x != blank, l))
def ctc_decode_greedy(acts, denumericalize_func, blank=0):
"""
acts: output activations of the model (shape [N x T x V] or [T x V])
blank: the blank symbol
returns: a list of denumericalized items
"""
if len(acts.shape) == 2:
acts = acts[None]
results = []
for batch in acts:
# batch is of shape [T x V]
# greedy
idxes = batch.argmax(dim=-1).cpu().numpy().tolist()
# decode
idxes = remove_duplicates(idxes)
idxes = remove_blanks(idxes, blank=blank)
# denumericalize
results.append(denumericalize_func(idxes))
if len(results) == 1:
return results[0]
return results
if __name__ == "__main__":
print("ctc:")
l = [0, 1, 1, 1, 2, 2, 1, 0, 3, 0, 3]
print(l)
l = remove_duplicates(l)
print(l)
l = remove_blanks(l)
print(l)
```
#### File: lib/layers/custom_rnn.py
```python
import math
import random
import torch
from torch.nn import Parameter, ParameterList
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from IPython.core.debugger import set_trace
ZONEOUT = 0.01
DEVICES = ["CPU", "GPU"]
RNN_TYPES = ["LSTM", "GRU", "NBRC"]
USE_PYTORCH = True
def get_rnn_impl(device, rnn_type, layer_norm=False):
assert device in DEVICES
assert rnn_type in RNN_TYPES
if device == "GPU":
if rnn_type == "LSTM":
if layer_norm:
# from haste_pytorch import LayerNormLSTM as RNN
from torch.nn import LSTM as RNN
else:
# from haste_pytorch import LSTM as RNN
from torch.nn import LSTM as RNN
if rnn_type == "GRU":
# from haste_pytorch import GRU as RNN
from torch.nn import GRU as RNN
if rnn_type == "NBRC":
raise Exception("NBRC GPU not available")
if device == "CPU":
if rnn_type == "LSTM":
if layer_norm:
# from .haste import LayerNormLSTM as RNN
from torch.nn import LSTM as RNN
else:
# from .haste import LSTM as RNN
from torch.nn import LSTM as RNN
if rnn_type == "GRU":
# from .haste import GRU as RNN
from torch.nn import GRU as RNN
if rnn_type == "NBRC":
from .haste import NBRC as RNN
return RNN
def get_weight_attrs(rnn_type, layer_norm):
attrs = [
"kernel",
"recurrent_kernel",
"bias",
]
if rnn_type == "GRU" or rnn_type == "NBRC":
attrs += [
"recurrent_bias",
]
if layer_norm:
attrs += [
"gamma",
"gamma_h",
"beta_h",
]
return attrs
def copy_weights(_from, _to, attrs):
for attr in attrs:
setattr(_to, attr, getattr(_from, attr))
def get_initial_state(rnn_type, hidden_size, init=torch.zeros):
if rnn_type == "LSTM":
h = nn.Parameter(init(2, 1, 1, hidden_size))
tmp = init(2, 1, 1, hidden_size)
else:
h = nn.Parameter(init(1, 1, 1, hidden_size))
tmp = init(1, 1, 1, hidden_size)
return h, tmp
class CustomRNN(nn.Module):
def __init__(
self,
input_size,
hidden_size,
num_layers=1,
batch_first=True,
rnn_type="LSTM",
reduction_indices=[],
reduction_factors=[],
reduction_drop=True,
rezero=False,
layer_norm=False,
utsp=0.9,
):
super().__init__()
self.batch_first = batch_first
self.hidden_size = hidden_size
self._is = [input_size] + [hidden_size] * (num_layers - 1)
self._os = [hidden_size] * num_layers
self.rnn_type = rnn_type
# reduction
assert len(reduction_indices) == len(reduction_factors)
self.reduction_indices = reduction_indices
self.reduction_factors = reduction_factors
# learnable state & temporary state
self.hs = nn.ParameterList()
for hidden_size in self._os:
h, tmp = get_initial_state(rnn_type, hidden_size)
self.hs.append(h)
# state cache (key: bs, value: state)
self.cache = {}
# norm (BN or LN)
self.bns = nn.ModuleList()
for i, o in zip(self._is, self._os):
norm = nn.BatchNorm1d(o)
# norm = nn.LayerNorm(o)
self.bns.append(norm)
# rezero
self.rezero = rezero
# percentage of carrying over last state
self.utsp = utsp
def convert_to_cpu(self):
return self
def convert_to_gpu(self):
return self
def forward_one_rnn(
self, x, i, state=None, should_use_tmp_state=False, lengths=None
):
bs = x.size(0)
if state is None:
s = self.cache[bs][i] if self.cache.get(bs) is not None else None
is_tmp_state_possible = self.training and s is not None
if is_tmp_state_possible and should_use_tmp_state:
# temporary state
pass
else:
# learnable state
if self.hs[i].size(0) == 2:
s = []
for h in self.hs[i]:
s.append(h.expand(1, bs, self._os[i]).contiguous())
s = tuple(s)
else:
s = self.hs[i][0].expand(1, bs, self._os[i]).contiguous()
else:
s = state[i]
if self.rnn_type == "LSTM" or self.rnn_type == "GRU":
# PyTorch
if lengths is not None:
seq = pack_padded_sequence(
x, lengths, batch_first=True, enforce_sorted=False
)
seq, new_state = self.rnns[i](seq, s)
x, _ = pad_packed_sequence(seq, batch_first=True)
return (x, new_state)
else:
return self.rnns[i](x, s)
else:
# haste
return self.rnns[i](x, s, lengths=lengths if lengths is not None else None)
def forward(self, x, state=None, lengths=None):
bs = x.size(0)
residual = 0.0
new_states = []
suts = random.random() > (1.0 - self.utsp)
for i, rnn in enumerate(self.rnns):
# reduce if necessary
if i in self.reduction_indices:
idx = self.reduction_indices.index(i)
r_f = self.reduction_factors[idx]
# to [N, H, T]
x = x.permute(0, 2, 1)
x = x.unfold(-1, r_f, r_f)
x = x.permute(0, 2, 1, 3)
# keep last
# x = x[:,:,:,-1]
# or take the mean
x = x.mean(-1)
# also reduce lengths
if lengths is not None:
lengths = lengths // r_f
# apply rnn
inp = x
x, new_state = self.forward_one_rnn(
inp, i, state=state, should_use_tmp_state=suts, lengths=lengths,
)
# apply norm
x = x.permute(0, 2, 1)
x = self.bns[i](x)
x = x.permute(0, 2, 1)
# apply residual
if self.rezero:
if torch.is_tensor(residual) and residual.shape == x.shape:
x = x + residual
# store new residual
residual = inp
new_states.append(new_state)
if self.training:
if len(new_states[0]) == 2:
self.cache[bs] = [
(h.detach().contiguous(), c.detach().contiguous())
for (h, c) in new_states
]
else:
self.cache[bs] = [h.detach() for h in new_states]
return x, new_states
class CustomGPURNN(CustomRNN):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._args = args
self._kwargs = kwargs
RNN = get_rnn_impl("GPU", self.rnn_type, kwargs["layer_norm"])
self.rnns = nn.ModuleList()
for i, o in zip(self._is, self._os):
# r = RNN(i, o, batch_first=self.batch_first, zoneout=ZONEOUT)
r = RNN(i, o, batch_first=self.batch_first)
self.rnns.append(r)
def convert_to_cpu(self):
if USE_PYTORCH:
return self.to("cpu")
dev = next(self.parameters()).device
inst = CustomCPURNN(*self._args, **self._kwargs)
attrs = get_weight_attrs(self.rnn_type, self._kwargs["layer_norm"])
for i, rnn in enumerate(self.rnns):
grabbed_rnn = inst.rnns[i]
copy_weights(rnn, grabbed_rnn, attrs)
return inst.to("cpu")
class CustomCPURNN(CustomRNN):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._args = args
self._kwargs = kwargs
RNN = get_rnn_impl("CPU", self.rnn_type, kwargs["layer_norm"])
self.rnns = nn.ModuleList()
for i, o in zip(self._is, self._os):
# r = RNN(i, o, batch_first=self.batch_first, zoneout=ZONEOUT)
r = RNN(i, o, batch_first=self.batch_first)
self.rnns.append(r)
def convert_to_gpu(self):
dev = next(self.parameters()).device
if USE_PYTORCH or self.rnn_type == "NBRC":
return self.to(dev)
inst = CustomGPURNN(*self._args, **self._kwargs)
attrs = get_weight_attrs(self.rnn_type, self._kwargs["layer_norm"])
for i, rnn in enumerate(self.rnns):
grabbed_rnn = inst.rnns[i]
copy_weights(rnn, grabbed_rnn, attrs)
return inst.to(dev)
```
#### File: layers/haste/base_rnn.py
```python
import torch
import torch.nn as nn
__all__ = ["BaseRNN"]
class BaseRNN(nn.Module):
def __init__(
self, input_size, hidden_size, batch_first, zoneout, return_state_sequence
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.batch_first = batch_first
self.zoneout = zoneout
self.return_state_sequence = return_state_sequence
def _permute(self, x):
if self.batch_first:
return x.permute(1, 0, 2)
return x
def _get_state(self, input, state, state_shape):
if state is None:
state = _zero_state(input, state_shape)
else:
_validate_state(state, state_shape)
return state
def _get_final_state(self, state, lengths):
if isinstance(state, tuple):
return tuple(self._get_final_state(s, lengths) for s in state)
if isinstance(state, list):
return [self._get_final_state(s, lengths) for s in state]
if self.return_state_sequence:
return self._permute(state[1:]).unsqueeze(0)
if lengths is not None:
cols = range(state.size(1))
return state[[lengths, cols]].unsqueeze(0)
return state[-1].unsqueeze(0)
def _get_zoneout_mask(self, input):
if self.zoneout:
zoneout_mask = input.new_empty(
input.shape[0], input.shape[1], self.hidden_size
)
zoneout_mask.bernoulli_(1.0 - self.zoneout)
else:
zoneout_mask = input.new_empty(0, 0, 0)
return zoneout_mask
def _is_cuda(self):
is_cuda = [tensor.is_cuda for tensor in list(self.parameters())]
if any(is_cuda) and not all(is_cuda):
raise ValueError(
"RNN tensors should all be CUDA tensors or none should be CUDA tensors"
)
return any(is_cuda)
def _validate_state(state, state_shape):
"""
Checks to make sure that `state` has the same nested structure and dimensions
as `state_shape`. `None` values in `state_shape` are a wildcard and are not
checked.
Arguments:
state: a nested structure of Tensors.
state_shape: a nested structure of integers or None.
Raises:
ValueError: if the structure and/or shapes don't match.
"""
if isinstance(state, (tuple, list)):
# Handle nested structure.
if not isinstance(state_shape, (tuple, list)):
raise ValueError(
"RNN state has invalid structure; expected {}".format(state_shape)
)
for s, ss in zip(state, state_shape):
_validate_state(s, ss)
else:
shape = list(state.size())
if len(shape) != len(state_shape):
raise ValueError(
"RNN state dimension mismatch; expected {} got {}".format(
len(state_shape), len(shape)
)
)
for i, (d1, d2) in enumerate(zip(list(state.size()), state_shape)):
if d2 is not None and d1 != d2:
raise ValueError(
"RNN state size mismatch on dim {}; expected {} got {}".format(
i, d2, d1
)
)
def _zero_state(input, state_shape):
"""
Returns a nested structure of zero Tensors with the same structure and shape
as `state_shape`. The returned Tensors will have the same dtype and be on the
same device as `input`.
Arguments:
input: Tensor, to specify the device and dtype of the returned tensors.
shape_state: nested structure of integers.
Returns:
zero_state: a nested structure of zero Tensors.
Raises:
ValueError: if `state_shape` has non-integer values.
"""
if isinstance(state_shape, (tuple, list)) and isinstance(state_shape[0], int):
state = input.new_zeros(*state_shape)
elif isinstance(state_shape, tuple):
state = tuple(_zero_state(input, s) for s in state_shape)
elif isinstance(state_shape, list):
state = [_zero_state(input, s) for s in state_shape]
else:
raise ValueError("RNN state_shape is invalid")
return state
```
#### File: layers/haste/lstm.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from .base_rnn import BaseRNN
__all__ = ["LSTM"]
# @torch.jit.script
def LSTMScript(
training: bool,
zoneout_prob: float,
input,
h0,
c0,
kernel,
recurrent_kernel,
bias,
zoneout_mask,
):
time_steps = input.shape[0]
batch_size = input.shape[1]
hidden_size = recurrent_kernel.shape[0]
h = [h0]
c = [c0]
Wx = input @ kernel
for t in range(time_steps):
v = h[t] @ recurrent_kernel + Wx[t] + bias
i, g, f, o = torch.chunk(v, 4, 1)
i = torch.sigmoid(i)
g = torch.tanh(g)
f = torch.sigmoid(f)
o = torch.sigmoid(o)
c.append(f * c[t] + i * g)
h.append(o * torch.tanh(c[-1]))
if zoneout_prob:
if training:
h[-1] = (h[-1] - h[-2]) * zoneout_mask[t] + h[-2]
else:
h[-1] = zoneout_prob * h[-2] + (1 - zoneout_prob) * h[-1]
h = torch.stack(h)
c = torch.stack(c)
return h, c
class LSTM(BaseRNN):
"""
Long Short-Term Memory layer.
This LSTM layer offers a fused, GPU-accelerated PyTorch op for inference
and training. Although this implementation is comparable in performance to
cuDNN's LSTM, it offers additional options not typically found in other
high-performance implementations. DropConnect and Zoneout regularization are
built-in, and this layer allows setting a non-zero initial forget gate bias.
See [\_\_init\_\_](#__init__) and [forward](#forward) for general usage.
See [from_native_weights](#from_native_weights) and
[to_native_weights](#to_native_weights) for compatibility with PyTorch LSTMs.
"""
def __init__(
self,
input_size,
hidden_size,
batch_first=False,
forget_bias=1.0,
dropout=0.0,
zoneout=0.0,
return_state_sequence=False,
):
"""
Initialize the parameters of the LSTM layer.
Arguments:
input_size: int, the feature dimension of the input.
hidden_size: int, the feature dimension of the output.
batch_first: (optional) bool, if `True`, then the input and output
tensors are provided as `(batch, seq, feature)`.
forget_bias: (optional) float, sets the initial bias of the forget gate
for this LSTM cell.
dropout: (optional) float, sets the dropout rate for DropConnect
regularization on the recurrent matrix.
zoneout: (optional) float, sets the zoneout rate for Zoneout
regularization.
return_state_sequence: (optional) bool, if `True`, the forward pass will
return the entire state sequence instead of just the final state. Note
that if the input is a padded sequence, the returned state will also
be a padded sequence.
Variables:
kernel: the input projection weight matrix. Dimensions
(input_size, hidden_size * 4) with `i,g,f,o` gate layout. Initialized
with Xavier uniform initialization.
recurrent_kernel: the recurrent projection weight matrix. Dimensions
(hidden_size, hidden_size * 4) with `i,g,f,o` gate layout. Initialized
with orthogonal initialization.
bias: the projection bias vector. Dimensions (hidden_size * 4) with
`i,g,f,o` gate layout. The forget gate biases are initialized to
`forget_bias` and the rest are zeros.
"""
super().__init__(
input_size, hidden_size, batch_first, zoneout, return_state_sequence
)
if dropout < 0 or dropout > 1:
raise ValueError("LSTM: dropout must be in [0.0, 1.0]")
if zoneout < 0 or zoneout > 1:
raise ValueError("LSTM: zoneout must be in [0.0, 1.0]")
self.forget_bias = forget_bias
self.dropout = dropout
self.kernel = nn.Parameter(torch.empty(input_size, hidden_size * 4))
self.recurrent_kernel = nn.Parameter(torch.empty(hidden_size, hidden_size * 4))
self.bias = nn.Parameter(torch.empty(hidden_size * 4))
self.reset_parameters()
def to_native_weights(self):
"""
Converts Haste LSTM weights to native PyTorch LSTM weights.
Returns:
weight_ih_l0: Parameter, the input-hidden weights of the LSTM layer.
weight_hh_l0: Parameter, the hidden-hidden weights of the LSTM layer.
bias_ih_l0: Parameter, the input-hidden bias of the LSTM layer.
bias_hh_l0: Parameter, the hidden-hidden bias of the LSTM layer.
"""
def reorder_weights(w):
i, g, f, o = torch.chunk(w, 4, dim=-1)
return torch.cat([i, f, g, o], dim=-1)
kernel = reorder_weights(self.kernel).permute(1, 0).contiguous()
recurrent_kernel = (
reorder_weights(self.recurrent_kernel).permute(1, 0).contiguous()
)
half_bias = reorder_weights(self.bias) / 2.0
kernel = torch.nn.Parameter(kernel)
recurrent_kernel = torch.nn.Parameter(recurrent_kernel)
bias1 = torch.nn.Parameter(half_bias)
bias2 = torch.nn.Parameter(half_bias.clone())
return kernel, recurrent_kernel, bias1, bias2
def from_native_weights(self, weight_ih_l0, weight_hh_l0, bias_ih_l0, bias_hh_l0):
"""
Copies and converts the provided PyTorch LSTM weights into this layer.
Arguments:
weight_ih_l0: Parameter, the input-hidden weights of the PyTorch LSTM layer.
weight_hh_l0: Parameter, the hidden-hidden weights of the PyTorch LSTM layer.
bias_ih_l0: Parameter, the input-hidden bias of the PyTorch LSTM layer.
bias_hh_l0: Parameter, the hidden-hidden bias of the PyTorch LSTM layer.
"""
def reorder_weights(w):
i, f, g, o = torch.chunk(w, 4, dim=-1)
return torch.cat([i, g, f, o], dim=-1)
kernel = reorder_weights(weight_ih_l0.permute(1, 0)).contiguous()
recurrent_kernel = reorder_weights(weight_hh_l0.permute(1, 0)).contiguous()
bias = reorder_weights(bias_ih_l0 + bias_hh_l0).contiguous()
self.kernel = nn.Parameter(kernel)
self.recurrent_kernel = nn.Parameter(recurrent_kernel)
self.bias = nn.Parameter(bias)
def reset_parameters(self):
"""Resets this layer's parameters to their initial values."""
hidden_size = self.hidden_size
for i in range(4):
nn.init.xavier_uniform_(
self.kernel[:, i * hidden_size : (i + 1) * hidden_size]
)
nn.init.orthogonal_(
self.recurrent_kernel[:, i * hidden_size : (i + 1) * hidden_size]
)
nn.init.zeros_(self.bias)
nn.init.constant_(
self.bias[hidden_size * 2 : hidden_size * 3], self.forget_bias
)
def forward(self, input, state=None, lengths=None):
"""
Runs a forward pass of the LSTM layer.
Arguments:
input: Tensor, a batch of input sequences to pass through the LSTM.
Dimensions (seq_len, batch_size, input_size) if `batch_first` is
`False`, otherwise (batch_size, seq_len, input_size).
lengths: (optional) Tensor, list of sequence lengths for each batch
element. Dimension (batch_size). This argument may be omitted if
all batch elements are unpadded and have the same sequence length.
Returns:
output: Tensor, the output of the LSTM layer. Dimensions
(seq_len, batch_size, hidden_size) if `batch_first` is `False` (default)
or (batch_size, seq_len, hidden_size) if `batch_first` is `True`. Note
that if `lengths` was specified, the `output` tensor will not be
masked. It's the caller's responsibility to either not use the invalid
entries or to mask them out before using them.
(h_n, c_n): the hidden and cell states, respectively, for the last
sequence item. Dimensions (1, batch_size, hidden_size).
"""
input = self._permute(input)
state_shape = [1, input.shape[1], self.hidden_size]
state_shape = (state_shape, state_shape)
h0, c0 = self._get_state(input, state, state_shape)
h, c = self._impl(input, (h0[0], c0[0]), self._get_zoneout_mask(input))
state = self._get_final_state((h, c), lengths)
output = self._permute(h[1:])
return output, state
def _impl(self, input, state, zoneout_mask):
return LSTMScript(
self.training,
self.zoneout,
input.contiguous(),
state[0].contiguous(),
state[1].contiguous(),
self.kernel.contiguous(),
F.dropout(self.recurrent_kernel, self.dropout, self.training).contiguous(),
self.bias.contiguous(),
zoneout_mask.contiguous(),
)
```
#### File: libreasr/lib/lm.py
```python
import torch
import torch.quantization
import torch.nn as nn
import torch.nn.functional as F
from libreasr.lib.utils import standardize, maybe_quantize
ALPHA = 0.1
THETA = 1.0
MIN_VAL = -10.0
DEBUG = False
class LM(nn.Module):
def __init__(self, vocab_sz, embed_sz, hidden_sz, num_layers, p=0.2, **kwargs):
super(LM, self).__init__()
self.embed = nn.Embedding(vocab_sz, embed_sz, padding_idx=0)
self.rnn = nn.LSTM(embed_sz, hidden_sz, batch_first=True, num_layers=num_layers)
self.drop = nn.Dropout(p)
self.linear = nn.Linear(hidden_sz, vocab_sz)
if embed_sz == hidden_sz:
# tie weights
self.linear.weight = self.embed.weight
def forward(self, x, state=None):
x = self.embed(x)
if state:
x, state = self.rnn(x, state)
else:
x, state = self.rnn(x)
x = self.drop(x)
x = self.linear(x)
x = F.log_softmax(x, dim=-1)
return x, state
class LMFuser:
def __init__(self, lm):
self.lm = lm
self.lm_logits = None
self.lm_state = None
self.has_lm = self.lm is not None
def advance(self, y_one_char):
if self.has_lm:
self.lm_logits, self.lm_state = self.lm(y_one_char, self.lm_state)
standardize(self.lm_logits)
self.lm_logits[:, :, 0] = MIN_VAL
def fuse(self, joint_out, prob, pred, alpha=ALPHA, theta=THETA):
lm_logits = self.lm_logits
if self.has_lm and torch.is_tensor(lm_logits):
standardize(joint_out)
joint_out[:, :, :, 0] = MIN_VAL
if DEBUG:
print(
"lm:",
lm_logits.shape,
lm_logits.mean(),
lm_logits.std(),
lm_logits.max(),
)
print(
"joint:",
joint_out.shape,
joint_out.mean(),
joint_out.std(),
joint_out.max(),
)
fused = alpha * lm_logits + theta * joint_out
prob, pred = fused.max(-1)
return fused, prob, pred
return joint_out, prob, pred
def reset(self):
self.lm_logits = None
self.lm_state = None
def load_lm(conf, lang):
# create model
lm = LM(**conf["lm"])
lm.eval()
# load model
lm.load_state_dict(torch.load(conf["lm"]["path"]))
lm.eval()
# quantize
lm = maybe_quantize(lm)
lm.eval()
return lm
```
#### File: libreasr/lib/models.py
```python
import operator
import time
import random
from queue import PriorityQueue
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
from fastai2.vision.models.xresnet import xresnet18
from fastai2.layers import Debugger, ResBlock
from fastai2.torch_core import Module
from fastai2.learner import CancelBatchException
from IPython.core.debugger import set_trace
from libreasr.lib.utils import *
from libreasr.lib.layers import *
from libreasr.lib.lm import LMFuser
class ResidualAdapter(Module):
"""
ResidualAdapter according to
https://ai.googleblog.com/2019/09/large-scale-multilingual-speech.html?m=1
"""
def __init__(
self, hidden_sz, projection="fcnet", projection_factor=3.2, activation=F.relu
):
self.hidden_sz = hidden_sz
self.activation = activation()
self.layer_norm = nn.LayerNorm(hidden_sz)
if projection == "conv":
pass
else:
bottleneck_sz = int(hidden_sz / projection_factor)
# find next multiple of 8 for performance
bottleneck_sz = bottleneck_sz + (8 - bottleneck_sz % 8)
self.down = nn.Linear(hidden_sz, bottleneck_sz)
self.up = nn.Linear(bottleneck_sz, hidden_sz)
def forward(self, x):
inp = x
# layer norm
x = self.layer_norm(x)
# down projection
x = self.down(x)
x = self.activation(x)
# up projection
x = self.up(x)
# residual connection
return x + inp
class Encoder(Module):
def __init__(
self,
feature_sz,
hidden_sz,
out_sz,
dropout=0.01,
num_layers=2,
trace=True,
device="cuda:0",
layer_norm=False,
rnn_type="LSTM",
use_tmp_state_pcent=0.9,
**kwargs,
):
self.num_layers = num_layers
self.input_norm = nn.LayerNorm(feature_sz)
self.rnn_stack = CustomCPURNN(
feature_sz,
hidden_sz,
num_layers,
rnn_type=rnn_type,
reduction_indices=[], # 1
reduction_factors=[], # 2
layer_norm=layer_norm,
rezero=False,
utsp=use_tmp_state_pcent,
)
self.drop = nn.Dropout(dropout)
if not hidden_sz == out_sz:
self.linear = nn.Linear(hidden_sz, out_sz)
else:
self.linear = nn.Sequential()
def param_groups(self):
return [p for p in self.parameters() if p.requires_grad]
def forward(self, x, state=None, lengths=None, return_state=False):
x = x.reshape((x.size(0), x.size(1), -1))
x = self.input_norm(x)
x, state = self.rnn_stack(x, state=state, lengths=lengths)
x = self.drop(x)
x = self.linear(x)
if return_state:
return x, state
return x
class Joint(Module):
def __init__(self, out_sz, joint_sz, vocab_sz, joint_method):
self.joint_method = joint_method
if joint_method == "add":
input_sz = out_sz
elif joint_method == "concat":
input_sz = 2 * out_sz
else:
raise Exception("No such joint_method")
self.joint = nn.Sequential(
nn.Linear(input_sz, joint_sz), nn.Tanh(), nn.Linear(joint_sz, vocab_sz),
)
def param_groups(self):
return [p for p in self.parameters() if p.requires_grad]
def forward(self, h_pred, h_enc):
if self.joint_method == "add":
x = h_pred + h_enc
elif self.joint_method == "concat":
x = torch.cat((h_pred, h_enc), dim=-1)
else:
raise Exception("No such joint_method")
x = self.joint(x)
return x
class Predictor(Module):
def __init__(
self,
vocab_sz,
embed_sz,
hidden_sz,
out_sz,
dropout=0.01,
num_layers=2,
blank=0,
layer_norm=False,
rnn_type="NBRC",
use_tmp_state_pcent=0.9,
):
self.vocab_sz = vocab_sz
self.num_layers = num_layers
self.embed = nn.Embedding(vocab_sz, embed_sz, padding_idx=blank)
if not embed_sz == hidden_sz:
self.ffn = nn.Linear(embed_sz, hidden_sz)
else:
self.ffn = nn.Sequential()
self.rnn_stack = CustomCPURNN(
hidden_sz,
hidden_sz,
num_layers,
rnn_type=rnn_type,
layer_norm=layer_norm,
utsp=use_tmp_state_pcent,
)
self.drop = nn.Dropout(dropout)
if not hidden_sz == out_sz:
self.linear = nn.Linear(hidden_sz, out_sz)
else:
self.linear = nn.Sequential()
def param_groups(self):
return [p for p in self.parameters() if p.requires_grad]
def forward(self, x, state=None, lengths=None):
x = self.embed(x)
x = self.ffn(x)
x, state = self.rnn_stack(x, state=state, lengths=lengths)
x = self.drop(x)
x = self.linear(x)
return x, state
class Transducer(Module):
def __init__(
self,
feature_sz,
embed_sz,
vocab_sz,
hidden_sz,
out_sz,
joint_sz,
lang,
l_e=6,
l_p=2,
p_j=0.0,
blank=0,
joint_method="concat",
perf=False,
act=F.relu,
use_tmp_bos=True,
use_tmp_bos_pcent=0.99,
encoder_kwargs={},
predictor_kwargs={},
**kwargs,
):
self.encoder = Encoder(
feature_sz, hidden_sz=hidden_sz, out_sz=out_sz, **encoder_kwargs,
)
self.predictor = Predictor(
vocab_sz,
embed_sz=embed_sz,
hidden_sz=hidden_sz,
out_sz=out_sz,
**predictor_kwargs,
)
self.joint = Joint(out_sz, joint_sz, vocab_sz, joint_method)
self.lang = lang
self.blank = blank
# TODO: dont hardcode
self.bos = 2
self.perf = perf
self.mp = False
self.bos_cache = {}
self.use_tmp_bos = use_tmp_bos
self.use_tmp_bos_pcent = use_tmp_bos_pcent
self.vocab_sz = vocab_sz
self.lm = None
@staticmethod
def from_config(conf, lang, lm=None):
m = Transducer(
conf["model"]["feature_sz"],
conf["model"]["embed_sz"],
conf["model"]["vocab_sz"],
conf["model"]["hidden_sz"],
conf["model"]["out_sz"],
conf["model"]["joint_sz"],
lang,
p_e=conf["model"]["encoder"]["dropout"],
p_p=conf["model"]["predictor"]["dropout"],
p_j=conf["model"]["joint"]["dropout"],
joint_method=conf["model"]["joint"]["method"],
perf=False,
bs=conf["bs"],
raw_audio=False,
use_tmp_bos=conf["model"]["use_tmp_bos"],
use_tmp_bos_pcent=conf["model"]["use_tmp_bos_pcent"],
encoder_kwargs=conf["model"]["encoder"],
predictor_kwargs=conf["model"]["predictor"],
).to(conf["cuda"]["device"])
m.mp = conf["mp"]
return m
def param_groups(self):
return [
self.encoder.param_groups(),
self.predictor.param_groups(),
self.joint.param_groups(),
]
def convert_to_cpu(self):
self.encoder.rnn_stack = self.encoder.rnn_stack.convert_to_cpu()
self.predictor.rnn_stack = self.predictor.rnn_stack.convert_to_cpu()
return self
def convert_to_gpu(self):
self.encoder.rnn_stack = self.encoder.rnn_stack.convert_to_gpu()
self.predictor.rnn_stack = self.predictor.rnn_stack.convert_to_gpu()
return self
def start_perf(self):
if self.perf:
self.t = time.time()
def stop_perf(self, name="unknown"):
if self.perf:
t = (time.time() - self.t) * 1000.0
print(f"{name.ljust(10, ' ')} | {t:4.2f}ms")
def grab_bos(self, y, yl, bs, device):
if self.training and self.use_tmp_bos:
r = random.random()
thresh = 1.0 - self.use_tmp_bos_pcent
cached_bos = self.bos_cache.get(bs)
if torch.is_tensor(cached_bos) and r > thresh:
# use end of last batch as bos
bos = cached_bos
return bos
# store for next batch
# is -1 acceptable here?
try:
q = torch.clamp(yl[:, None] - 1, min=0)
self.bos_cache[bs] = y.gather(1, q).detach()
except:
pass
# use regular bos
bos = torch.zeros((bs, 1), device=device).long()
bos = bos.fill_(self.bos)
return bos
def forward(self, tpl):
"""
(x, y)
x: N tuples (audios of shape [N, n_chans, seq_len, H], x_lens)
y: N tuples (y_padded, y_lens)
"""
# unpack
x, y, xl, yl = tpl
if self.mp:
x = x.half()
# encoder
self.start_perf()
x = x.reshape(x.size(0), x.size(1), -1)
encoder_out = self.encoder(x, lengths=xl)
self.stop_perf("encoder")
# N: batch size
# T: n frames (time)
# H: hidden features
N, T, H = encoder_out.size()
# predictor
# concat first bos (yconcat is y shifted right by 1)
bos = self.grab_bos(y, yl, bs=N, device=encoder_out.device)
yconcat = torch.cat((bos, y), dim=1)
self.start_perf()
# yl here because we want to omit the last label
# in the resulting state (we had (yl + 1))
predictor_out, _ = self.predictor(yconcat, lengths=yl)
self.stop_perf("predictor")
U = predictor_out.size(1)
# expand:
# we need to pass [N, T, U, H] to the joint network
# NOTE: we might want to not include padding here?
M = max(T, U)
sz = (N, T, U, H)
encoder_out = encoder_out.unsqueeze(2).expand(sz).contiguous()
predictor_out = predictor_out.unsqueeze(1).expand(sz).contiguous()
# print(encoder_out.shape, predictor_out.shape)
# joint & project
self.start_perf()
joint_out = self.joint(predictor_out, encoder_out)
self.stop_perf("joint")
# log_softmax only when using rnnt of 1ytic
joint_out = F.log_softmax(joint_out, -1)
return joint_out
def decode(self, *args, **kwargs):
res, log_p, _ = self.decode_greedy(*args, **kwargs)
return res, log_p
def transcribe(self, *args, **kwargs):
res, _, metrics, _ = self.decode_greedy(*args, **kwargs)
return res, metrics
def decode_greedy(self, x, max_iters=3, alpha=0.005, theta=1.0):
"x must be of shape [C, T, H]"
# keep stats
metrics = {}
extra = {
"iters": [],
"outs": [],
}
# put model into evaluation mode
self.eval()
self.encoder.eval()
self.predictor.eval()
self.joint.eval()
# check shape of x
if len(x.shape) == 2:
# add channel dimension
x = x[None]
# reshape x to (1, C, T, H...)
x = x[None]
# encode full spectrogram (all timesteps)
encoder_out = self.encoder(x)[0]
# predictor: BOS goes first
y_one_char = torch.LongTensor([[self.bos]]).to(encoder_out.device)
h_t_pred, pred_state = self.predictor(y_one_char)
# lm
fuser = LMFuser(self.lm)
# iterate through all timesteps
y_seq, log_p = [], 0.0
for h_t_enc in encoder_out:
iters = 0
while iters < max_iters:
iters += 1
# h_t_enc is of shape [H]
# go through the joint network
_h_t_pred = h_t_pred[None]
_h_t_enc = h_t_enc[None, None, None]
joint_out = self.joint(_h_t_pred, _h_t_enc)
# decode one character
joint_out = F.log_softmax(joint_out, dim=-1)
extra["outs"].append(joint_out.clone())
prob, pred = joint_out.max(-1)
pred = int(pred)
log_p += float(prob)
# if blank, advance encoder state
# if not blank, add to the decoded sequence so far
# and advance predictor state
if pred == self.blank:
break
else:
# fuse with lm
joint_out, prob, pred = fuser.fuse(joint_out, prob, pred)
y_seq.append(pred)
y_one_char[0][0] = pred
# advance predictor
h_t_pred, pred_state = self.predictor(y_one_char, state=pred_state)
# advance lm
fuser.advance(y_one_char)
# record how many iters we had
extra["iters"].append(iters)
# compute alignment score
# better if distributed along the sequence
align = np.array(extra["iters"])
_sum = align.sum()
val, cnt = np.unique(align, return_counts=True)
d = {v: c for v, c in zip(val, cnt)}
_ones = d.get(1, 0)
alignment_score = (_sum - _ones) / (_sum + 1e-4)
metrics["alignment_score"] = alignment_score
return self.lang.denumericalize(y_seq), -log_p, metrics, extra
def transcribe_stream(
self, stream, denumericalizer, max_iters=10, alpha=0.3, theta=1.0
):
"""
stream is expected to yield chunks of shape (NCHANS, CHUNKSIZE)
"""
# put model into evaluation mode
self.eval()
# state to hold while transcribing
encoder_state = None
predictor_state = None
# current token
y_one_char = torch.LongTensor([[self.bos]])
h_t_pred = None
# sequence of the hole stream
y = []
# lm
fuser = LMFuser(self.lm)
def reset_encoder():
nonlocal encoder_state
encoder_state = None
def reset_predictor():
nonlocal y_one_char, h_t_pred, predictor_state
# initialize predictor
# blank goes first
y_one_char = torch.LongTensor([[self.bos]])
h_t_pred, predictor_state = self.predictor(y_one_char)
def reset_lm():
fuser.reset()
def reset():
reset_encoder()
reset_predictor()
reset_lm()
# reset at start
reset()
# iterate through time
# T > 1 is possible
blanks = 0
nonblanks = 0
for chunk in stream:
# in case we get a None, just continue
if chunk is None:
continue
# -> [1, T, H, W]
chunk = chunk[None]
# forward pass encoder
self.start_perf()
if encoder_state is None:
encoder_out, encoder_state = self.encoder(chunk, return_state=True)
else:
encoder_out, encoder_state = self.encoder(
chunk, state=encoder_state, return_state=True
)
h_t_enc = encoder_out[0]
self.stop_perf("encoder")
self.start_perf()
# loop over encoder states (t)
y_seq = []
for i in range(h_t_enc.size(-2)):
h_enc = h_t_enc[..., i, :]
iters = 0
while iters < max_iters:
iters += 1
# h_enc is of shape [H]
# go through the joint network
_h_t_pred = h_t_pred[None]
_h_t_enc = h_enc[None, None, None]
# print(_h_t_pred.shape)
# print(_h_t_enc.shape)
joint_out = self.joint(_h_t_pred, _h_t_enc)
# decode one character
joint_out = F.log_softmax(joint_out, dim=-1)
prob, pred = joint_out.max(-1)
pred = int(pred)
# if blank, advance encoder state
# if not blank, add to the decoded sequence so far
# and advance predictor state
if pred == self.blank:
blanks += 1
break
else:
# fuse with lm
joint_out, prob, pred = fuser.fuse(joint_out, prob, pred)
y_seq.append(pred)
y_one_char[0][0] = pred
# advance predictor
h_t_pred, predictor_state = self.predictor(
y_one_char, state=predictor_state
)
# advance lm
fuser.advance(y_one_char)
nonblanks += 1
# add to y
y = y + y_seq
yield y, denumericalizer(y_seq), reset
self.stop_perf("joint + predictor")
class CTCModel(Module):
def __init__(self):
layer = nn.TransformerEncoderLayer(128, 8)
self.encoder = nn.TransformerEncoder(layer, 8)
self.linear = nn.Linear(128, 2048)
def convert_to_gpu(self):
pass
def param_groups(self):
return [p for p in self.parameters() if p.requires_grad]
@staticmethod
def from_config(conf, lang):
return CTCModel()
def forward(self, tpl):
x, y, xl, yl = tpl
x = x.view(x.size(1), x.size(0), -1).contiguous()
x = self.encoder(x)
x = self.linear(x)
x = F.log_softmax(x, -1)
return x
```
#### File: libreasr/lib/model_utils.py
```python
import tarfile
from pathlib import Path
import os
import hashlib
import glob
from IPython.core.debugger import set_trace
import torch
from fastai2.learner import load_model
from libreasr.lib.utils import maybe_quantize
_PATH_ARCHIVE = Path("libreasr-model.tar.gz")
_PATH_TOKENIZER = Path("tokenizer.yttm-model")
_PATH_MODEL = Path("model.pth")
_PATH_DEST = Path("./tmp")
def add(tar, p_archive, p_real):
tar.addfile(tarfile.TarInfo(str(p_archive)), str(p_real))
def save_asr_model(
lang,
path_tokenizer=_PATH_TOKENIZER,
path_model=_PATH_MODEL,
path_archive=_PATH_ARCHIVE,
):
"""
Bundles
- tokenizer.yttm-model (tokenizer model)
- model.pth (PyTorch model)
into a single .tar.gz :path_archive:
"""
p_base_real = _PATH_DEST / Path(lang)
p_base_arc = Path(lang)
tar = tarfile.open(path_archive, mode="w:gz")
add(tar, p_base_arc / path_tokenizer, p_base_real / path_tokenizer)
add(tar, p_base_arc / path_model, p_base_real / path_model)
tar.close()
def extract_tars(paths_archive=None, path_dest=_PATH_DEST):
"""
extract .tar.gz file
"""
if paths_archive is None:
paths_archive = glob.glob("./libreasr-model-*.tar.gz")
for arc in paths_archive:
tar = tarfile.open(arc)
tar.extractall(path=path_dest)
def load_asr_model(
model,
lang_name,
lang,
device="cpu",
lm=None,
path_tokenizer=_PATH_TOKENIZER,
path_archive=_PATH_ARCHIVE,
):
"""
Loads an asr model from a given .tar.gz file
"""
# delete attrs
model.lang = None
model.lm = None
# model
try:
load_model(
str(Path(_PATH_DEST / Path(lang_name) / "model.pth")),
model,
None,
with_opt=False,
device=device,
)
except Exception as e:
print("Unable to load_model(...)")
raise e
# quantize model
# we need to this before loading our model
# as the saved model is quantized ???
model = maybe_quantize(model)
return model
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.