metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "JN513/youtube_downloader",
"score": 3
} |
#### File: JN513/youtube_downloader/main.py
```python
from PyQt5.QtCore import Qt, QThread
from PyQt5.QtWidgets import (
QVBoxLayout,
QWidget,
QPushButton,
QLineEdit,
QApplication,
QLabel,
QVBoxLayout,
QFileDialog,
QFormLayout,
QRadioButton,
QButtonGroup,
QMessageBox,
QHBoxLayout,
QTabWidget,
)
from core.thread import YoutubeDownloadThread, InstagramDownloadThread
import sys
import os
VERSION = "1.0.1-beta"
GITHUB = "https://github.com/JN513"
REPOSITORY = "https://github.com/JN513/youtube_downloader"
class Window(QWidget):
def __init__(self):
super().__init__()
self.path_to_save = None
self.type = 0
self.ittype = 1
self.content = 0
self.basedir = os.path.dirname(os.path.realpath(__file__))
self.initUI()
def initUI(self):
# self.setGeometry(300, 300, 290, 150)
self.setWindowTitle("Youtube download")
tabs = QTabWidget()
tabs.addTab(self.youtube_UI(), "Youtube")
tabs.addTab(self.instagram_UI(), "Instagram")
tabs.addTab(self.info_UI(), "Info")
main_layout = QVBoxLayout()
main_layout.addWidget(tabs)
self.setLayout(main_layout)
self.show()
def youtube_UI(self):
youtubeTab = QWidget()
self.yt_top_label = QLabel()
self.yt_top_label.setText("Insira um ou mais links separados por ';'.")
self.yt_input = QLineEdit()
self.type_label = QLabel()
self.type_label.setText("Tipo")
self.rbty1 = QRadioButton("Playlist")
self.rbty2 = QRadioButton("Video")
self.rbty2.setChecked(True)
self.rbty1.toggled.connect(self.onClicked_type)
self.rbty2.toggled.connect(self.onClicked_type)
self.content_label = QLabel()
self.content_label.setText("Conteudo")
self.rbtn1 = QRadioButton("MP4")
self.rbtn2 = QRadioButton("MP3")
self.rbtn2.setChecked(True)
self.rbtn1.toggled.connect(self.onClicked_content)
self.rbtn2.toggled.connect(self.onClicked_content)
self.btngroup1 = QButtonGroup()
self.btngroup2 = QButtonGroup()
self.btngroup1.addButton(self.rbtn1)
self.btngroup1.addButton(self.rbtn2)
self.btngroup2.addButton(self.rbty1)
self.btngroup2.addButton(self.rbty2)
self.label_title_for_label_path = QLabel("Diretorio atual: ")
self.label_path = QLabel()
if self.path_to_save == None:
self.label_path.setText(self.basedir)
self.label_status = QLabel()
self.label_status.setText("")
btn_opendir = QPushButton("Escolher diretorio", self)
btn_opendir.clicked.connect(self.select_save_dir)
self.btn_download = QPushButton("Baixar", self)
self.btn_download.clicked.connect(self.download_yt)
input_form = QFormLayout()
input_form.addRow("Links:", self.yt_input)
layout = QVBoxLayout()
layout_type = QVBoxLayout()
layout_content = QVBoxLayout()
layout_dir = QHBoxLayout()
layout_status = QHBoxLayout()
layout.addWidget(btn_opendir)
layout.addWidget(self.btn_download)
layout_type.addWidget(self.type_label)
layout_type.addWidget(self.rbty1)
layout_type.addWidget(self.rbty2)
layout_content.addWidget(self.content_label)
layout_content.addWidget(self.rbtn1)
layout_content.addWidget(self.rbtn2)
layout_dir.addWidget(self.label_title_for_label_path)
layout_dir.addWidget(self.label_path)
layout_status.addWidget(self.label_status)
main_layout = QVBoxLayout()
main_layout.addWidget(self.yt_top_label)
main_layout.addLayout(input_form)
main_layout.addLayout(layout_type)
main_layout.addLayout(layout_content)
main_layout.addLayout(layout_dir)
main_layout.addLayout(layout_status)
main_layout.addLayout(layout)
youtubeTab.setLayout(main_layout)
return youtubeTab
def instagram_UI(self):
instagramTab = QWidget()
self.it_top_label = QLabel()
self.it_top_label.setText(
"Insira um ou mais links ou codigos separados por ';'."
)
self.it_input = QLineEdit()
self.label_title_for_label_path = QLabel("Diretorio atual: ")
self.label_path = QLabel()
if self.path_to_save == None:
self.label_path.setText(self.basedir)
btn_opendir = QPushButton("Escolher diretorio", self)
btn_opendir.clicked.connect(self.select_save_dir)
self.it_btn_download = QPushButton("Baixar", self)
self.it_btn_download.clicked.connect(self.download_insta)
self.ittype_label = QLabel("Tipo:")
self.btngroup3 = QButtonGroup()
self.itbtn1 = QRadioButton("Link")
self.itbtn2 = QRadioButton("Codigo")
self.itbtn2.setChecked(True)
self.btngroup3.addButton(self.itbtn1)
self.btngroup3.addButton(self.itbtn2)
self.itbtn1.toggled.connect(self.onClicked_type)
self.itbtn2.toggled.connect(self.onClicked_type)
layout = QVBoxLayout()
layout_type = QVBoxLayout()
layout_dir = QHBoxLayout()
layout_status = QHBoxLayout()
layout_type.addWidget(self.ittype_label)
layout_type.addWidget(self.itbtn1)
layout_type.addWidget(self.itbtn2)
layout_dir.addWidget(self.label_title_for_label_path)
layout_dir.addWidget(self.label_path)
layout.addWidget(btn_opendir)
layout.addWidget(self.it_btn_download)
layout_status.addWidget(self.label_status)
input_form = QFormLayout()
input_form.addRow("Links:", self.it_input)
main_layout = QVBoxLayout()
main_layout.addWidget(self.it_top_label)
main_layout.addLayout(input_form)
main_layout.addLayout(layout_type)
main_layout.addLayout(layout_dir)
main_layout.addLayout(layout_status)
main_layout.addLayout(layout)
instagramTab.setLayout(main_layout)
return instagramTab
def info_UI(self):
infoTab = QWidget()
linkTemplate = "<a href={0}>{1}</a>"
autor_label = QLabel("Criado por: <NAME>")
github_label = QLabel("Github: ")
github_link = QLabel(linkTemplate.format(GITHUB, GITHUB))
github_link.setOpenExternalLinks(True)
repositorio_label = QLabel("Repositorio: ")
repositorio_link = QLabel(linkTemplate.format(REPOSITORY, REPOSITORY))
repositorio_link.setOpenExternalLinks(True)
sobre_label = QLabel(
"Sobre: Este programa foi criado para facilitar o download de vídeos do youtube e instagram."
)
version_label = QLabel(f"Versao: {VERSION}")
github_layout = QHBoxLayout()
github_layout.addWidget(github_label)
github_layout.addWidget(github_link)
repositorio_layout = QHBoxLayout()
repositorio_layout.addWidget(repositorio_label)
repositorio_layout.addWidget(repositorio_link)
main_layout = QVBoxLayout()
main_layout.addWidget(autor_label)
main_layout.addLayout(github_layout)
main_layout.addLayout(repositorio_layout)
main_layout.addWidget(sobre_label)
main_layout.addWidget(version_label)
infoTab.setLayout(main_layout)
return infoTab
def select_save_dir(self):
dir_ = QFileDialog.getExistingDirectory(
None, "Select a folder:", "~/", QFileDialog.ShowDirsOnly
)
self.path_to_save = dir_
self.label_path.setText(dir_)
def download_yt(self):
self.label_status.setText("Fazendo Download ...")
links = self.yt_input.text()
if self.path_to_save == None:
self.path_to_save = self.basedir
self.thread = QThread()
self.worker = YoutubeDownloadThread(
self.path_to_save, links, self.type, self.content
)
self.worker.moveToThread(self.thread)
self.thread.started.connect(self.worker.run)
self.worker.finished.connect(self.thread.quit)
self.worker.finished.connect(self.thread.deleteLater)
self.worker.alert.connect(self.alert)
print(f"Thread criada")
self.thread.start()
print(f"Thread iniciada")
self.btn_download.setEnabled(False)
self.thread.finished.connect(lambda: self.btn_download.setEnabled(True))
self.thread.finished.connect(
lambda: self.label_status.setText("Download(s) Concluidos")
)
def download_insta(self):
self.label_status.setText("Fazendo Download ...")
links = self.it_input.text()
if self.path_to_save == None:
self.path_to_save = self.basedir
self.thread_it = QThread()
self.worker_it = InstagramDownloadThread(links, self.path_to_save, self.ittype)
self.worker_it.moveToThread(self.thread_it)
self.thread_it.started.connect(self.worker_it.run)
self.worker_it.finished.connect(self.thread_it.quit)
self.worker_it.finished.connect(self.thread_it.deleteLater)
self.worker_it.alert.connect(self.alert)
print(f"Thread criada")
self.thread_it.start()
print(f"Thread iniciada")
self.it_btn_download.setEnabled(False)
self.thread_it.finished.connect(lambda: self.it_btn_download.setEnabled(True))
self.thread_it.finished.connect(
lambda: self.label_status.setText("Download(s) Concluidos")
)
def onClicked_type(self):
btn = self.sender()
if btn.isChecked():
if btn.text() == "Video":
self.type = 0
elif btn.text() == "Playlist":
self.type = 1
elif btn.text() == "Link":
self.ittype = 0
elif btn.text() == "Codigo":
self.ittype = 1
def onClicked_content(self):
btn = self.sender()
if btn.isChecked():
if btn.text() == "MP3":
self.content = 0
elif btn.text() == "MP4":
self.content = 1
def alert(self, content, body):
msg = QMessageBox()
msg.setIcon(QMessageBox.Information)
msg.setText(content)
msg.setInformativeText(body)
msg.setWindowTitle("Info")
msg.exec_()
if __name__ == "__main__":
app = QApplication(sys.argv)
window = Window()
sys.exit(app.exec_())
``` |
{
"source": "jn89b/hierarchy_astar_uavs",
"score": 3
} |
#### File: hierarchy_astar_uavs/hierarchy_astar_uavs/monte_carlo.py
```python
from __future__ import print_function
from bson.objectid import ObjectId
import math
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
from scipy import spatial
from queue import PriorityQueue
from datetime import *
"""
To do:
get information of all open availible landing zones in the grida
get information of drones who are flying in and requesting service
get information of geofencing area in grid locations
plan flight for uav based on cost heuristic of minimal distance
and no collisions
guide UAV to landing zone and set the open landing zone to closed
set UAV state to 1
listen for incoming UAVs and check if landing not avail
"""
class Node():
"""
parent = parent of current node
posiition = position of node right now it will be x,y coordinates
g = cost from start to current to node
h = heuristic
f = is total cost
"""
def __init__(self, parent, position):
self.parent = parent
self.position = position
self.g = 0
self.h = 0
self.f = 0
def __lt__(self, other):
return self.f < other.f
# Compare nodes
def __eq__(self, other):
return self.position == other.position
# Print node
def __repr__(self):
return ('({0},{1})'.format(self.position, self.f))
class Astar():
"""Astar"""
def __init__(self, grid, obs_list,start, goal):
self.grid = grid
self.start = [int(i) for i in start]
print("start is", start)
self.goal = goal
print("goal is", goal)
self.collision_bubble = 3.5
self.height_boundary = 20
self.ground_boundary = 5
self.obstacle_list = obs_list
self.openset = PriorityQueue() # priority queue
self.closedset = {}
#self.openset = []
def is_collision(self,distance):
"""check if there is a collision if so return True"""
if distance <= self.collision_bubble:
return True
def find_closest_obstacle(self, obstacles, current_position):
"""find closest obstacle from obstacle list, wrt current position"""
tree = spatial.KDTree(obstacles)
dist, obst_index = tree.query(current_position)
return dist, obst_index
def init_node(self):
start_node = Node(None,tuple(self.start))
start_node.g = start_node.h = start_node.f = 0
self.openset.put((start_node.f, start_node))
#self.openset.append(start_node)
self.end_node = Node(None, tuple(self.goal))
self.end_node.g = self.end_node.h = self.end_node.f = 0
def is_move_valid(self, node_position):
"""check if move made is valid if so then return True"""
if (node_position[0] > (len(self.grid) - 1) or
node_position[0] < 0 or
node_position[1] > (len(self.grid)-1) or
node_position[1] < 0 or
node_position[2] > self.height_boundary or
node_position[2] < self.ground_boundary ):
return False
def is_target_close(self, position, goal):
"""refactor this, just have distance as input"""
"""check if we are close to target if so we remove the penalty heuristic for
flying high or low"""
distance = self.compute_euclidean(position,goal)
if distance <= 1.5:
return True
def compute_euclidean(self,position, goal):
"""compute euclidiean with position and goal as 3 vector component"""
distance = math.sqrt(((position[0] - goal.position[0]) ** 2) +
((position[1] - goal.position[1]) ** 2) +
((position[2] - goal.position[2]) ** 2))
return distance
#This function return the path of the search
def return_path(self, current_node, grid):
path = []
no_rows = len(grid)
no_columns = len(grid)
# here we create the initialized result maze with -1 in every position
result = [[-1 for i in range(no_columns)] for j in range(no_rows)]
current = current_node
while current is not None:
path.append(current.position)
current = current.parent
# Return reversed path as we need to show from start to end path
path = path[::-1]
start_value = 0
# we update the path of start to end found by A-star serch with every step incremented by 1
for i in range(len(path)):
result[path[i][0]][path[i][1]] = start_value
start_value += 1
return path
def main(self):
ss = 1
move = [[ss, 0, 0 ], # go forward
[ 0, -ss, 0], # go left
[ -ss, 0 , 0], # go backward
[ 0, ss, 0 ], #go right
[ss, ss, 0 ], #go forward right
[ss, -ss, 0], #go forward left
[-ss, ss, 0 ], #go back right
[-ss, -ss, 0], #go back left
[ 0, ss , ss], #go up z
[ 0, ss, -ss]] # go down z
self.init_node()
count = 0
"""main implementation"""
while not self.openset.empty():
#while len(self.openset) > 0:
count = count + 1
#print(count)
if count >= 4000:
print("iterations too much")
return self.closedset
if self.openset.empty():
print("No more moves")
#pop node off from priority queue and add into closedset
cost,current_node = self.openset.get()
self.closedset[current_node.position] = current_node
#check if we hit the goal
if current_node.position == self.end_node.position:
#print("Goal reached", current_node.position)
path = self.return_path(current_node, self.grid)
print("success!", count)
return path
#move generation
children = []
for new_position in move:
node_position = (current_node.position[0] + new_position[0],\
current_node.position[1] + new_position[1],\
current_node.position[2] + new_position[2])
# Make sure within range (check if within maze boundary)
if self.is_move_valid(node_position) == False:
#print("move is invalid")
continue
# Make sure walkable terrain
#print("checking node", node_position)
if self.grid[node_position] != 0:
#print("not walkable")
continue
#check collision bubble here
dist, obst_index = self.find_closest_obstacle(self.obstacle_list, node_position)
#print("checking", self.obstacle_list[obst_index])
if self.is_collision(dist):
#print("collision")
continue
#create new node
new_node = Node(current_node, node_position)
# put to possible paths
children.append(new_node)
#check each children
for child in children:
#check if children is already visited
if child.position in self.closedset:
#print("Exists", child.position)
continue
if abs(current_node.position[2] - child.position[2]) == 1:
penalty = 1.25
#print("penalty", penalty)
else:
penalty = 1
"""Heuristic costs calculated here, this is using eucledian distance"""
#print("child.position", child.position)
if self.is_target_close(current_node.position, self.end_node):
#print("current_node", current_node.position)
child.g = current_node.g + 1
child.h = self.compute_euclidean(child.position, self.end_node)
dynamic_weight = 0.5
child.f = child.g + (child.h *penalty*dynamic_weight)
#print(child.f)
else:
#print(current_node.g)
child.g = current_node.g + 1
dynamic_weight = 1.5
child.h = self.compute_euclidean(child.position, self.end_node)
child.f = child.g + (child.h *penalty*dynamic_weight)
#add to open set
#print("putting in", child)
self.openset.put((child.f, child))
class PreLandingService():
"""
Pre Landing Service:
Assigns Landing Zones with waypoints from
Should probably rename as Pre Flight Planner
"""
ip_address = "127.0.0.1"
port_num = 27017
poolsize = 100
database_name = "message_store"
main_col_name = "data_service"
landing_srv_col_name = "landing_service_db"
landing_zone_col_name = "landing_zones"
geofencing_col = None #need to figure out how to set up geofencing in the area
def __init__(self):
#access database
self.dbInfo = Database.AbstractDatabaseInfo(self.ip_address, self.port_num, self.poolsize)
self.mainDB = self.dbInfo.access_database(self.database_name)
#collections
self.main_collection = self.mainDB[self.main_col_name]
self.landing_service_col = self.mainDB[self.landing_srv_col_name]
self.landing_zone_col = self.mainDB[self.landing_zone_col_name]
#ros service proxies with mongodb
self.data_srv_col_prox = MessageStoreProxy(collection=self.main_col_name)
self.landing_srv_col_prox = MessageStoreProxy(collection=self.landing_srv_col_name)
self.landing_zone_col_prox = MessageStoreProxy(collection=self.landing_zone_col_name)
def check_open_zones(self):
myquery = {"Vacant": True}
cursor = self.landing_zone_col.find(myquery)
if cursor.count() == 0:
return False
def find_open_zones(self):
"""requests query for open landing zones"""
myquery = {"Vacant": True}
open_zone_names = []
open_zone_coordinates= []
cursor = self.landing_zone_col.find(myquery)
for document in cursor:
open_zone_names.append(document['Zone Number'])
open_zone_coordinates.append(tuple(document['location']))
return open_zone_names, open_zone_coordinates
def get_uavs(self):
myquery = {"landing_service_status": 0}
uav_names = []
uav_battery = []
cursor = self.landing_service_col.find(myquery)
for document in cursor:
uav_names.append(document['uav_name'])
uav_battery.append(document['battery'])
return uav_names, uav_battery
def get_uav_info(self, field_name):
"""return field name info where landing service status is at 0
field_name must be type str"""
myquery = {"landing_service_status": 0}
uav_info_list = []
cursor = self.landing_service_col.find(myquery)
for document in cursor:
uav_info_list.append(document[field_name])
return uav_info_list
def find_closest_zone(self, uav_loc, landing_zones):
"""find closest zone location to uav location"""
print("finding closest zone in", landing_zones)
tree = spatial.KDTree(landing_zones)
dist,zone_index = tree.query(uav_loc)
return dist, zone_index
def assign_uav_zone(self,uav_name, zone_name, uav_home_list):
"""assigns uav to zone and sets the landing zone as false, so no longer vacant"""
self.landing_zone_col.update({"Zone Number": zone_name},
{ "$set": {
"Occupied by": uav_name,
"Vacant": False }})
self.landing_service_col.update({"_id": uav_name},
{ "$set": {
"Zone Assignment": zone_name,
"Home Position": uav_home_list}})
print("Assigned", uav_name + " to landing zone ", zone_name)
def find_waypoints(self,grid_space, obstacles, uav_loc, goal_point):
"""sends the actual location in Unreal Engine coordiante axis
so 5.0 is the actual 5.0 of the Unreal coordinate frame"""
astar = Astar(grid_space, obstacles, uav_loc, goal_point)
uav_wp = astar.main()
return uav_wp
def insert_waypoints(self,uav_name, uav_waypoint_list):
self.landing_service_col.update({"_id": uav_name},
{ "$set": {
"Waypoint": uav_waypoint_list}})
def get_offset_wp(self, uav_path, home_base_loc):
"""might not need this offset"""
array = np.array(uav_path)
result = (array[:,0] - home_base_loc[0], array[:,1] - home_base_loc[1], array[:,2])
x = result[0]
y = result[1]
z = result[2]
offset_wp = [list(coords) for coords in zip(x,y,z) ]
return offset_wp
def return_unassigned_list(self,some_list, index):
"""return all other zones or uavs not assigned to uav to make as a no fly zone"""
copy = some_list
copy.pop(index)
print("copy", copy)
return copy
def add_obstacles(self,grid, obstacle_list):
""""add obstacles to grid location"""
for obstacle in obstacle_list:
#print(obstacle)
(grid[obstacle[2],obstacle[0], obstacle[1]]) = 1
return obstacle_list
def get_dynamic_obstacles(self, idx, uav_path_obs):
"""generate dynamic obstacles from uav waypoints"""
#should be a function to make dynamic obstacles
if idx == 0:
new_obstacle = obstacle_list + \
self.return_unassigned_list(zone_locations[:], zone_idx)
else:
uav_path_obs.append(path_list[idx-1])
flat_list = [item for sublist in uav_path_obs for item in sublist]
new_obstacle = obstacle_list + \
self.return_unassigned_list(zone_locations[:], zone_idx) + \
self.return_unassigned_list(uav_loc_list[:], idx) + flat_list
grid_copy = grid.copy()
new_obstacle = self.add_obstacles(grid_copy, new_obstacle)
return grid_copy, new_obstacle
def compute_vectors(self,point_1, point_2, point_3):
vector_start = np.array(point_2)- np.array(point_1)
vector_end = np.array(point_3) - np.array(point_2)
return vector_start, vector_end
def compute_cross_product(self,vector_1, vector_2):
return np.cross(vector_1, vector_2)
def reduce_waypoints(self,waypoint_list):
print(waypoint_list)
filtered_waypoints = []
for i, waypoint in enumerate(waypoint_list):
if i+2 - len(waypoint_list) == 0:
filtered_waypoints.append(waypoint_list[i+1])
"""might want to append last waypoint value to new list"""
return filtered_waypoints
vec_start, vec_end = self.compute_vectors(waypoint, waypoint_list[i+1], waypoint_list[i+2])
cross_product = self.compute_cross_product(vec_start, vec_end)
if (cross_product[0] == 0 and cross_product[1] == 0
and cross_product[2] == 0):
print("collinear")
else:
print("not collinear")
filtered_waypoints.append(waypoint)
filtered_waypoints.append(waypoint_list[i+2])
return filtered_waypoints
def generate_grid(grid_row, grid_col, grid_height):
grid = []
grid = np.zeros((grid_height, grid_row, grid_col))
return grid
def plot_path(grid_z, grid_x, grid_y, waypoint_list, obstacles, goal):
"""plot pathway -> using this for flight trajectory"""
fig = plt.figure()
ax = Axes3D(fig)
ax.set_xlim([-1, grid_x])
ax.set_ylim([-1, grid_y])
ax.set_zlim([-1, 30])
for obstacle in obstacles:
ax.scatter(obstacle[0],obstacle[1], obstacle[2], color='red')
#plot waypoints
x = [x[0] for x in waypoint_list]
y = [y[1] for y in waypoint_list]
z = [z[2] for z in waypoint_list]
ax.plot3D(x,y,z, 'bo', linestyle="-")
ax.scatter(goal[0], goal[1], goal[2], color='purple', marker="+")
plt.grid()
plt.show()
if __name__ == '__main__':
preLandingService = PreLandingService()
"""this is the homebase need to refactor this"""
grid_z = 50 # this is probably the z axis
grid_x = 50 # this is x
grid_y = 50 # this is y
grid = generate_grid(grid_z, grid_x,grid_y)
static_obstacle_list = [(30,10)]
obstacle_list = []
for static_obstacle in static_obstacle_list:
x = static_obstacle[0]
y = static_obstacle[1]
for z in range(25):
obstacle_list.append((x,y,z))
""""""
obstacle_list = preLandingService.add_obstacles(grid, obstacle_list)
"""this is very bloated need to refactor"""
if preLandingService.check_open_zones() == False:
print("No open zones")
else:
print("assigning")
uav_path_obs = []
path_list = []
"""probably better to refactor the information as a dictionary and
delete after its done doing its job"""
uav_names = preLandingService.get_uav_info("uav_name")
uav_loc_list = preLandingService.get_uav_info("uav_location")
uav_home_list = preLandingService.get_uav_info("uav_home")
"""assigning locations"""
for idx, uav_loc in enumerate(uav_loc_list):
zone_names, zone_locations = preLandingService.find_open_zones()
dist, zone_idx = preLandingService.find_closest_zone(uav_loc, zone_locations)
preLandingService.assign_uav_zone(uav_names[idx], zone_names[zone_idx], uav_home_list[idx])
"""generating obstacles"""
grid_copy, new_obstacle = preLandingService.get_dynamic_obstacles(idx, uav_path_obs)
"""apply astar algorithim here"""
uav_wp = preLandingService.find_waypoints(grid_copy, new_obstacle, \
uav_loc, zone_locations[zone_idx])
path_list.append(uav_wp)
"""reduce the amount of waypoints we need to send"""
filter_wp = preLandingService.reduce_waypoints(uav_wp)
preLandingService.insert_waypoints(uav_names[idx], filter_wp)
"""this plot is for debugging"""
#plot_path(grid_z, grid_x, grid_y, uav_wp, new_obstacle, zone_locations[zone_idx])
```
#### File: hierarchy_astar_uavs/hierarchy_astar_uavs/plot_animation.py
```python
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
from IPython.display import HTML # Animation on jupyter lab
from matplotlib.animation import PillowWriter # For GIF animation
#####Data Generation####
import pandas as pd
import pickle
x = []
y = []
z = []
def load_pickle(pkl_file):
"""load pkl file"""
with open(pkl_file, 'rb') as f:
path = pickle.load(f)
return path
class AnimateSearchSpace():
"""
needs the following inputs:
self.method_name = type(str) the name of the path finding algorithm using
self.path_method = list of 3 consisting of [path, iterations, and dictionary of visited]
"""
def __init__(self, method_name, path_method, method_position, start_pos, goal_pos):
self.x = []
self.y = []
self.z = []
self.start_pos = start_pos
self.goal_pos = goal_pos
self.method_name = method_name
self.path_method = path_method
self.method_position = method_position
def __set_size_params(self,x_bounds, y_bounds, z_bounds):
self.fig = plt.figure(figsize=(8, 8))
self.ax = self.fig.add_subplot(111, projection="3d")
self.ax.set_xlim3d(x_bounds[0], x_bounds[1])
self.ax.set_ylim3d(y_bounds[0], y_bounds[1])
self.ax.set_zlim3d(z_bounds[0], z_bounds[1])
self.title = self.ax.set_title(self.method_name)
def update_graph(self,num):
if num == len(self.method_position)-1:
print("yes")
x_list = [position[0] for position in self.path_method[0]]
y_list = [position[1] for position in self.path_method[0]]
z_list = [position[2] for position in self.path_method[0]]
for path in self.path_method[0]:
self.ax.plot(x_list, y_list, z_list, color='blue',linewidth=5)
self.title.set_text('{}, Number of Visited Nodes={}, Total Number of Iterations {}'.format(
self.method_name, num, self.path_method[1]))
else:
data = self.method_position[num]
print("num", num)
self.x.append(data[0])
self.y.append(data[1])
self.z.append(data[2])
self.graph._offsets3d = (self.x, self.y, self.z)
self.title.set_text('{}, Number of Visited Nodes={}'.format(self.method_name, num))
return self.graph,
def plot_start_goal(self):
self.ax.scatter(self.start_pos[0], self.start_pos[1], self.start_pos[2], color='green', s=60, marker='x')
self.ax.scatter(self.goal_pos[0], self.goal_pos[1], self.goal_pos[2], color='red', s=60, marker='x')
def animate_plot(self, x_bounds, y_bounds, z_bounds):
"""animate the 3d plot with specificiations of the bounds of the grid to plot"""
marker_size = 80
self.__set_size_params(x_bounds, y_bounds, z_bounds)
self.graph = self.ax.scatter(self.method_position[0], self.method_position[1][0], self.method_position[2][0], color='orange')
self.plot_start_goal()
self.ani = animation.FuncAnimation(self.fig, self.update_graph,frames=len(self.method_position),
interval=10, blit=False, repeat=False)
plt.show()
start_pos = [0,0,50]
goal_pos = [6,0,50]
plt.close('all')
x_bounds = [0,8]
y_bounds = [0,8]
z_bounds = [45,55]
djikstra = load_pickle('djikstra_path.pkl')
djikstra_position = [list(v.position) for (k,v) in djikstra[2].items()]
djikstra_search = AnimateSearchSpace(start_pos=start_pos, goal_pos=goal_pos, method_name='Djikstra', path_method=djikstra, method_position=djikstra_position)
djikstra_search.animate_plot(x_bounds=x_bounds, y_bounds=y_bounds, z_bounds=z_bounds)
astarreg = load_pickle('astarreg.pkl')
astarreg_position = [list(v.position) for (k,v) in astarreg[2].items()]
astarreg_search = AnimateSearchSpace(start_pos=start_pos, goal_pos=goal_pos, method_name='Astar', path_method=astarreg, method_position=astarreg_position)
astarreg_search.animate_plot(x_bounds, y_bounds, z_bounds)
```
#### File: hierarchy_astar_uavs/hierarchy_astar_uavs/plot_situation.py
```python
from tracemalloc import start
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
import seaborn as sns
def save_image(image_name, fig):
"""saves image"""
image_format = 'svg' # e.g .png, .svg, etc.
# image_name = 'myimage.svfg'
fig.savefig('images/'+image_name+'.svg', format=image_format, dpi=1200)
class PlotSituation():
"""
sanity check plots
"""
def __init__(self, grid, obstacle_list):
self.grid = grid
self.grid_size = [grid.x_size+1, grid.y_size+1, grid.z_size]
self.obstacle_list = obstacle_list
## COLOR PARAMS
self.color_list = ['g', 'c', 'm', 'b']
self.color_map = {str([0,0]): self.color_list[0],
str([0,1]): self.color_list[1],
str([1,0]): self.color_list[2],
str([1,1]): self.color_list[3]
}
def __set_axis(self):
fig = plt.figure()
ax = plt.axes(projection="3d")
#add offsets
ax.set_xlim([-1,100])
ax.set_ylim([-1,100])
ax.set_zlim([0,50])
# ax.set_xlim([-1, self.grid_size[0]+1])
# ax.set_ylim([-1, self.grid_size[1]+1])
# ax.set_zlim([-1, self.grid_size[2]+1])
return fig,ax
def __set_2d_axis(self):
fig = plt.figure()
ax = plt.axes()
ax.set_xlim([-1,100])
ax.set_ylim([-1,100])
return fig,ax
def __plot_static_obstacles(self, ax):
for obstacle in self.obstacle_list:
#obstacle_z_list = obstacle.z_list
#for obstacle_z in obstacle_z_list:
ax.scatter(obstacle[0], obstacle[1], obstacle[2], color='red')
def plot_config_space(self):
"""just plotting the configuration space"""
fig, ax = self.__set_axis()
#self.__plot_static_obstacles(ax)
#lets plot regions in here too
color_list = ['g', 'c', 'm', 'b']
line_styles = ['-', '--', '-.', ':']
cluster_dict = self.grid.cluster_dict
for i, (cluster_key, cluster) in enumerate(cluster_dict.items()):
for j, (entrance_key, entrance) in enumerate(cluster.mapped_entrances.items()):
if entrance:
x_val = [x[0] for x in entrance]
y_val = [y[1] for y in entrance]
z_val = [z[2] for z in entrance]
ax.plot(x_val, y_val, z_val[0], color = color_list[i], linestyle=line_styles[j],
label=(cluster_key,entrance_key))
#ax.scatter(x_val, y_val, z_val, color=color_list[i], marker='x')
#xticks = np.arange(0,self.grid[0])
#ax.legend()
plt.grid()
plt.show()
save_image('Regions', fig)
def __extract_keys(self, node_keys):
node_locations = []
for node_key in node_keys:
node_locations.append(eval(node_key))
return node_locations
def plot_reservation(self,reservation_table):
fig, ax = self.__set_axis()
for val in reservation_table:
ax.scatter3D(val[0], val[1], val[2])
def plot_nodes(self, graph):
"""plot connection nodes"""
fig, ax = self.__set_axis()
self.__plot_static_obstacles(ax)
node_dict = graph.graph
node_keys = node_dict.keys()
node_locations = self.__extract_keys(node_keys)
#lets plot regions in here too
color_list = ['g', 'c', 'm', 'b']
line_styles = ['-', '--', '-.', ':']
cluster_dict = self.grid.cluster_dict
color_map = {str([0,0]): color_list[0],
str([0,1]): color_list[1],
str([1,0]): color_list[2],
str([1,1]): color_list[3]
}
for node_key, node_list in node_dict.items():
node_coords = []
inter_lines = []
for node in node_list:
node_coords.append(node.location)
if node.node_type == "INTER":
node_marker = "x"
inter_lines.append(node.location)
ax.plot(node.location[0], node.location[1], node.location[2],
marker=node_marker, color=color_map[str(node.cluster_coord)])
# x_val = [x[0] for x in node_coords]
# y_val = [y[1] for y in node_coords]
# z_val = [z[2] for z in node_coords]
# ax.plot(x_val, y_val, z_val, color=color_map[str(node.cluster_coord)])
#ax.plot(x_val, y_val, z_val, color=color_list[0])
# ax.set_xlabel("x position")
def plot_start_end(self, graph, start_position):
fig, ax = self.__set_axis()
self.__plot_static_obstacles(ax)
node_dict = graph.graph
node_keys = node_dict.keys()
node_locations = self.__extract_keys(node_keys)
#lets plot regions in here too
color_list = ['g', 'c', 'm', 'b']
line_styles = ['-', '--', '-.', ':']
cluster_dict = self.annotated_map.cluster_dict
color_map = {str([0,0]): color_list[0],
str([0,1]): color_list[1],
str([1,0]): color_list[2],
str([1,1]): color_list[3]
}
for node_key, node_list in node_dict.items():
node_coords = []
inter_lines = []
for node in node_list:
node_coords.append(node.location)
if node.node_type == "INTER":
node_marker = "x"
inter_lines.append(node.location)
ax.plot(node.location[0], node.location[1], node.location[2],
marker=node_marker, color=color_map[str(node.cluster_coord)])
def plot_quadrant(self, graph, coordinates):
"""plot the quadrant"""
color_list = ['g', 'c', 'm', 'b']
fig,ax = self.__set_axis()
self.__plot_static_obstacles(ax)
x_val = [x[0] for x in coordinates]
y_val = [y[1] for y in coordinates]
z_val = [z[2] for z in coordinates]
ax.plot(x_val, y_val, z_val, color=color_list[0])
def plot_inter_nodes(self,graph, fig=None, ax=None):
if fig == None and ax == None:
fig, ax = self.__set_axis()
else:
node_dict = graph.graph
node_keys = node_dict.keys()
node_locations = self.__extract_keys(node_keys)
for node_key, node_list in node_dict.items():
node_coords = []
inter_lines = []
for node in node_list:
node_coords.append(node.location)
if node.node_type == "INTER":
node_marker = "x"
inter_lines.append(node.location)
ax.plot(node.location[0], node.location[1], node.location[2],
marker=node_marker, color=self.color_map[str(node.cluster_coord)])
save_image('Inter Nodes', fig)
# def save_image(image_name, fig):
# """saves image"""
# image_format = 'svg' # e.g .png, .svg, etc.
# # image_name = 'myimage.svfg'
# fig.savefig('images/'+image_name+'.svg', format=image_format, dpi=1200)
def plot_abstract_path(self, path_list, graph, color, filename):
"""plots the abstract from the waypoints assigned
show the regions and entryways I have to go through"""
fig ,ax = self.__set_axis()
#self.plot_inter_nodes(graph, ax)
self.__plot_static_obstacles(ax)
#plot start and stop points
start_end_size = 50
start_points = path_list[0]
end_points = path_list[-1]
ax.scatter3D(start_points[0], start_points[1], start_points[2], color="cyan", marker='o',
s=start_end_size)
ax.scatter3D(end_points[0], end_points[1], end_points[2], color="green", marker='^',
s=start_end_size)
path_coords = []
for path in path_list:
path_coords.append(path)
x_val = [x[0] for x in path_coords]
y_val = [y[1] for y in path_coords]
z_val = [z[2] for z in path_coords]
ax.plot(x_val, y_val, z_val, color=str(color))
save_image(filename, fig)
def plot_overall_paths(self, overall_path, graph, color,filename, uav_key=None):
"""plots the abstract from the waypoints assigned
show the regions and entryways I have to go through"""
fig ,ax = self.__set_axis()
#self.plot_inter_nodes(graph, ax)
# self.__plot_static_obstacles(ax)
#self.plot_inter_nodes(graph, fig=fig, ax=ax)
cluster_dict = self.grid.cluster_dict
#lets plot regions in here too
line_styles = ['-', '--', '-.', ':']
if uav_key !=None:
color_list = uav_key
else:
color_list = ['g', 'c', 'm', 'b']
#plot start and stop points
for i,path_list in enumerate(overall_path):
path_coords = []
if isinstance(path_list, int):
continue
for path in path_list:
start_end_size = 50
start_points = path_list[0]
end_points = path_list[-1]
ax.scatter3D(start_points[0], start_points[1], start_points[2], color="cyan", marker='o',
s=start_end_size)
ax.scatter3D(end_points[0], end_points[1], end_points[2], color="green", marker='^',
s=start_end_size)
path_coords.append(path)
x_val = [x[0] for x in path_coords]
y_val = [y[1] for y in path_coords]
z_val = [z[2] for z in path_coords]
ax.plot(x_val, y_val, z_val, color=color_list[i],fillstyle='none')
node_dict = graph.graph
node_keys = node_dict.keys()
node_locations = self.__extract_keys(node_keys)
#lets plot regions in here too
color_list = ['g', 'c', 'm', 'b']
line_styles = ['-', '--', '-.', ':']
cluster_dict = self.grid.cluster_dict
color_map = {str([0,0]): color_list[0],
str([0,1]): color_list[1],
str([1,0]): color_list[2],
str([1,1]): color_list[3]
}
# for node_key, node_list in node_dict.items():
# node_coords = []
# inter_lines = []
# for node in node_list:
# node_coords.append(node.location)
# if node.node_type == "INTER":
# node_marker = "x"
# inter_lines.append(node.location)
# ax.plot(node.location[0], node.location[1], node.location[2],
# marker=node_marker, color=color_map[str(node.cluster_coord)])
save_image(filename, fig)
def plot_2d_paths(self, overall_path, graph, color,filename, uav_key=None):
""""""
fig,ax = self.__set_2d_axis()
#self.plot_inter_nodes(graph, ax)
# self.__plot_static_obstacles(ax)
#self.plot_inter_nodes(graph, fig=fig, ax=ax)
cluster_dict = self.grid.cluster_dict
#lets plot regions in here too
line_styles = ['-', '--', '-.', ':']
if uav_key !=None:
color_list = uav_key
else:
color_list = ['g', 'c', 'm', 'b']
#plot start and stop points
for i,path_list in enumerate(overall_path):
path_coords = []
if isinstance(path_list, int):
continue
for path in path_list:
start_end_size = 50
start_points = path_list[0]
end_points = path_list[-1]
ax.scatter(start_points[0], start_points[1], color=color_list[i], marker='o',
s=start_end_size)
ax.scatter(end_points[0], end_points[1], color=color_list[i], marker='^',
s=start_end_size)
path_coords.append(path)
x_val = [x[0] for x in path_coords]
y_val = [y[1] for y in path_coords]
ax.plot(x_val, y_val, color=color_list[i],marker='o',fillstyle='none')
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
def plot_start_goal(self, overall_path, graph, color,filename):
"""plots the abstract from the waypoints assigned
show the regions and entryways I have to go through"""
fig ,ax = self.__set_axis()
#self.plot_inter_nodes(graph, ax)
# self.__plot_static_obstacles(ax)
#self.plot_inter_nodes(graph, fig=fig, ax=ax)
cluster_dict = self.grid.cluster_dict
#lets plot regions in here too
color_list = ['g', 'c', 'm', 'b']
line_styles = ['-', '--', '-.', ':']
#plot start and stop points
for path_list in overall_path:
path_coords = []
if isinstance(path_list, int):
continue
for path in path_list:
start_end_size = 50
start_points = path_list[0]
end_points = path_list[-1]
ax.scatter3D(start_points[0], start_points[1], start_points[2], color="cyan", marker='o',
s=start_end_size)
ax.scatter3D(end_points[0], end_points[1], end_points[2], color="green", marker='^',
s=start_end_size)
path_coords.append(path)
x_val = [x[0] for x in path_coords]
y_val = [y[1] for y in path_coords]
z_val = [z[2] for z in path_coords]
ax.plot(x_val, y_val, z_val, color=str(color), fillstyle='none')
node_dict = graph.graph
node_keys = node_dict.keys()
node_locations = self.__extract_keys(node_keys)
#lets plot regions in here too
color_list = ['g', 'c', 'm', 'b']
line_styles = ['-', '--', '-.', ':']
cluster_dict = self.grid.cluster_dict
color_map = {str([0,0]): color_list[0],
str([0,1]): color_list[1],
str([1,0]): color_list[2],
str([1,1]): color_list[3]
}
for node_key, node_list in node_dict.items():
node_coords = []
inter_lines = []
for node in node_list:
node_coords.append(node.location)
if node.node_type == "INTER":
node_marker = "x"
inter_lines.append(node.location)
ax.plot(node.location[0], node.location[1], node.location[2],
marker=node_marker, color=color_map[str(node.cluster_coord)])
save_image(filename, fig)
class Plotter():
"""generic class to plot stuff as I go on this semester"""
def __init__(self):
"""styles can be dark,etc"""
self.style = sns.set_style("darkgrid")
self.fontsize = 16
def plot_basic_line(self, x_vals, y_vals, title_name, x_label, y_label):
"""plot axis labels"""
fig = plt.figure()
plt.plot(x_vals, y_vals)
plt.title(title_name, fontsize=self.fontsize)
plt.xlabel(x_label, fontsize=self.fontsize)
plt.ylabel(y_label, fontsize=self.fontsize)
plt.tight_layout()
def plot_multiple_lines(self, x_list, y_list, line_labels, title_name, x_label, y_label):
"""plot multiple from x list and y list has line labels to refer to the line
this assumes that you have the same x axis, which you probably should have as well
as the same units for comparison for your y axis"""
fig = plt.figure()
for i,y_vals in enumerate(y_list):
plt.plot(x_list, y_vals, label=line_labels[i])
plt.title(title_name, fontsize=self.fontsize)
plt.xlabel(x_label, fontsize=self.fontsize)
plt.ylabel(y_label, fontsize=self.fontsize)
plt.legend()
plt.tight_layout()
def plot_multiple_response(self, x_list, y_list, line_labels, title_name, x_label, y_label):
"""plot multiple from x list and y list has line labels to refer to the line
this assumes that you have the same x axis, which you probably should have as well
as the same units for comparison for your y axis"""
fig = plt.figure()
color_pallete = sns.color_palette("rocket", n_colors=len(y_list))
print("color is ", color_pallete)
for i, (x_vals,y_vals,line_names) in enumerate(zip(x_list, y_list, line_labels)):
plt.plot(x_vals, y_vals, label=line_names, color = color_pallete[i])
plt.title(title_name, fontsize=self.fontsize)
plt.xlabel(x_label, fontsize=self.fontsize)
plt.ylabel(y_label, fontsize=self.fontsize)
plt.legend()
plt.tight_layout()
def plot_subplots(self, num_subplots, num_cols, x_list, y_list):
"""plot a bunch of subplots to the system """
# https://stackoverflow.com/questions/12319796/dynamically-add-create-subplots-in-matplotlib
# Subplots are organized in a Rows x Cols Grid
# Tot and Cols are known
Tot = num_subplots
Cols = num_cols
# Compute Rows required
Rows = Tot // Cols
Rows += Tot % Cols
# Create a Position index
Position = range(1,Tot + 1)
# Create main figure
fig = plt.figure()
for k in range(Tot):
# add every single subplot to the figure with a for loop
for x_vals,y_vals in zip(x_list, y_list):
ax = fig.add_subplot(Rows,Cols,Position[k])
for x, y in zip(x_vals, y_vals):
ax.plot(x, y)
plt.show()
``` |
{
"source": "jn89b/utm_monte_carlo_simulation",
"score": 4
} |
#### File: utm_monte_carlo_simulation/scripts/PathFinding.py
```python
import sys
import numpy as np
import math
from scipy import spatial
from queue import PriorityQueue
class Node():
"""
parent = parent of current node
posiition = position of node right now it will be x,y coordinates
g = cost from start to current to node
h = heuristic
f = is total cost
"""
def __init__(self, parent, position):
self.parent = parent
self.position = position
self.g = 0
self.h = 0
self.f = 0
def __lt__(self, other):
return self.f < other.f
# Compare nodes
def __eq__(self, other):
return self.position == other.position
# Print node
def __repr__(self):
return ('({0},{1})'.format(self.position, self.f))
class Astar():
"""Astar"""
def __init__(self, grid, obs_list,start, goal, min_h,max_h):
self.grid = grid
self.start = [int(i) for i in start]
#print("start is", start)
self.goal = goal
#print("goal is", goal)
self.collision_bubble = 4.0
self.height_boundary = 20
self.ground_boundary = 5
self.obstacle_list = obs_list
self.openset = PriorityQueue() # priority queue
self.closedset = {}
self.min_h = min_h
self.max_h = max_h
def is_collision(self,distance):
"""check if there is a collision if so return True"""
if distance <= self.collision_bubble:
return True
def find_closest_obstacle(self, obstacles, current_position):
"""find closest obstacle from obstacle list, wrt current position"""
tree = spatial.KDTree(obstacles)
dist, obst_index = tree.query(current_position)
return dist, obst_index
def init_node(self):
start_node = Node(None,tuple(self.start))
start_node.g = start_node.h = start_node.f = 0
self.openset.put((start_node.f, start_node))
#self.openset.append(start_node)
self.end_node = Node(None, tuple(self.goal))
self.end_node.g = self.end_node.h = self.end_node.f = 0
def is_move_valid(self, node_position):
"""check if move made is valid if so then return True"""
if (node_position[0] > (len(self.grid) - 1) or
node_position[0] < 0 or
node_position[1] > (len(self.grid)-1) or
node_position[1] < 0 or
node_position[2] > self.height_boundary or
node_position[2] < self.ground_boundary ):
return False
def is_target_close(self, position, goal):
"""refactor this, just have distance as input"""
"""check if we are close to target if so we remove the penalty heuristic for
flying high or low"""
distance = self.compute_euclidean(position,goal)
if distance <= 1.5:
return True
def compute_euclidean(self,position, goal):
"""compute euclidiean with position and goal as 3 vector component"""
distance = math.sqrt(((position[0] - goal.position[0]) ** 2) +
((position[1] - goal.position[1]) ** 2) +
((position[2] - goal.position[2]) ** 2))
return distance
#This function return the path of the search
def return_path(self, current_node, grid):
"""return path if there is one"""
path = []
no_rows = len(grid)
no_columns = len(grid)
# here we create the initialized result maze with -1 in every position
result = [[-1 for i in range(no_columns)] for j in range(no_rows)]
current = current_node
while current is not None:
path.append(current.position)
current = current.parent
# Return reversed path as we need to show from start to end path
path = path[::-1]
start_value = 0
# we update the path of start to end found by A-star serch with every step incremented by 1
for i in range(len(path)):
result[path[i][0]][path[i][1]] = start_value
start_value += 1
return path
def main(self):
"""main method implementation of Astar"""
ss = 1
move = [[ss, 0, 0 ], # go forward
[ 0, -ss, 0], # go left
[ -ss, 0 , 0], # go backward
[ 0, ss, 0 ], #go right
[ss, ss, 0 ], #go forward right
[ss, -ss, 0], #go forward left
[-ss, ss, 0 ], #go back right
[-ss, -ss, 0], #go back left
[ 0, ss , ss], #go up z
[ 0, ss, -ss]] # go down z
self.init_node()
count = 0
"""main implementation"""
while not self.openset.empty():
count = count + 1
#print(count)
if count >= 2000:
print("iterations too much")
return None
if self.openset.empty():
print("No more moves")
return None
#pop node off from priority queue and add into closedset
cost,current_node = self.openset.get()
self.closedset[current_node.position] = current_node
#check if we hit the goal
if current_node.position == self.end_node.position:
#print("Goal reached", current_node.position)
path = self.return_path(current_node, self.grid)
print("success!", count)
return path
#move generation
children = []
for new_position in move:
node_position = (current_node.position[0] + new_position[0], current_node.position[1] + new_position[1], current_node.position[2] + new_position[2])
# Make sure within range (check if within maze boundary)
if self.is_move_valid(node_position) == False:
#print("move is invalid")
continue
# Make sure walkable terrain
#print("checking node", node_position)
if self.grid[node_position] != 0:
#print("not walkable")
continue
#check collision bubble here
dist, obst_index = self.find_closest_obstacle(self.obstacle_list, node_position)
#print("checking", self.obstacle_list[obst_index])
if self.is_collision(dist):
#print("collision")
continue
#create new node
new_node = Node(current_node, node_position)
# put to possible paths
children.append(new_node)
#check each children
for child in children:
#check if children is already visited
if child.position in self.closedset:
#print("Exists", child.position)
continue
if abs(current_node.position[2] - child.position[2]) == 1:
penalty = 1.25
#print("penalty", penalty)
else:
penalty = 1
"""Heuristic costs and determines if we want to be greedy or become more like Djikstras"""
#print("child.position", child.position)
if self.is_target_close(current_node.position, self.end_node):
child.g = current_node.g + 1
child.h = self.compute_euclidean(child.position, self.end_node)
dynamic_weight = self.min_h
child.f = child.g + (child.h *penalty*dynamic_weight)
else:
child.g = current_node.g + 1
dynamic_weight = self.max_h
child.h = self.compute_euclidean(child.position, self.end_node)
child.f = child.g + (child.h *penalty*dynamic_weight)
#add to open set
#print("putting in", child)
self.openset.put((child.f, child))
``` |
{
"source": "jn89b/UTM",
"score": 2
} |
#### File: utm/scripts/apriltag_norm.py
```python
import roslib
import rospy
import tf
from nav_msgs.msg import Odometry
from std_msgs.msg import Header
from geometry_msgs.msg import PoseWithCovarianceStamped
from geometry_msgs.msg import PoseWithCovariance
from geometry_msgs.msg import Vector3, Point, PoseStamped, TwistStamped, PointStamped
from apriltag_ros.msg import AprilTagDetectionArray
class ApriltagNorm():
def __init__(self):
self.tag_id_ = rospy.get_param("~tag_id",0)
self.new_tf = rospy.get_param("~norm_tag_tf", "/tag_0")
self.source_tf = rospy.get_param("~cam_tf", "front_center_custom_optical")
#self.drone_frame_id_ = rospy.get_param("~quad_tf", "/drone0_wrt_world")
self.tags_topic_ = rospy.get_param('~tags_topic', '/tag_detections')
self.setpoint_topic_ = rospy.get_param('~setpoint_topic', 'setpoint/relative_pos')
rospy.Subscriber(self.tags_topic_, AprilTagDetectionArray, self.tagsCallback)
self.tag_norm_pub = rospy.Publisher("tag_norm", PoseStamped,queue_size=10)
self.br = tf.TransformBroadcaster()
# tags callback
def tagsCallback(self, msg):
valid = False
trans = []
scale_factor = 10
if len(msg.detections) > 0: # make sure detection is valid
overall_pose = msg.detections[0].pose.pose.pose
x = overall_pose.position.x/scale_factor
y = overall_pose.position.y/scale_factor
z = overall_pose.position.z/scale_factor
qx = overall_pose.orientation.x
qy = overall_pose.orientation.y
qz = overall_pose.orientation.x
qw = overall_pose.orientation.z
#(trans,rot) = self.tf_listener_.lookupTransform(self.drone_frame_id_, self.tag_frame_id_, rospy.Time(0))
#print(trans,rot)
valid = True
else:
rospy.logwarn("No valid TF for the required tag %s", self.tag_id_)
return
if valid:
print(x,y,z)
now = rospy.Time.now()
pose_msg = PoseStamped()
pose_msg.header.frame_id = self.new_tf
pose_msg.header.stamp = rospy.Time.now()
pose_msg.pose.position.x = x
pose_msg.pose.position.y = y
pose_msg.pose.position.z = z
pose_msg.pose.orientation.x = qx
pose_msg.pose.orientation.y = qy
pose_msg.pose.orientation.z = qz
pose_msg.pose.orientation.w = qw
self.tag_norm_pub.publish(pose_msg)
#sending transform rtagwrtdrone Rtag/drone
#self.br.sendTransform((trans[0]/scale_factor,trans[1]/scale_factor, trans[2]- camera_offset_z),(rot[0],rot[1],rot[2],rot[3]),now,self.new_tf, self.drone_frame_id_)
self.br.sendTransform((x,y,z),(0,0,0,1),now,self.new_tf, self.source_tf)
else:
pass
if __name__ == '__main__':
rospy.init_node('apriltag_norm', anonymous=True)
sp_o = ApriltagNorm()
rospy.loginfo("intialize apriltag norm")
rospy.spin()
```
#### File: utm/scripts/ekf_tag.py
```python
import rospy
import tf
import numpy as np
from geometry_msgs.msg import Point, PoseStamped, PoseWithCovarianceStamped, TwistStamped
from apriltag_ros.msg import AprilTagDetectionArray
"""
class kalman filter subscribes to lateral position of tag and makes an estimate on position of
of tag wrt to quad based on the camera sensor also makes an estimate on how fast tag is moving
"""
class KalmanFilter():
def __init__(self, F = None, B = None, H = None, Q = None, R = None, P = None, x0 = None):
if(F is None or H is None):
raise ValueError("Set proper system dynamics.")
self.n = F.shape[1]
self.m = H.shape[1]
self.F = F
self.H = H
self.B = 0 if B is None else B
self.Q = np.eye(self.n) if Q is None else Q
self.R = np.eye(self.n) if R is None else R
self.P = np.eye(self.n) if P is None else P
self.x = np.zeros((self.n, 1)) if x0 is None else x0
self.z = [0] * len(self.H)
#this is the apriltag position subscriber
rospy.Subscriber("tag/pose", PoseStamped, self.tagpose_cb)
self.kf_pub = rospy.Publisher("kf_tag/pose", PoseStamped, queue_size=10)
self.kf_vel_pub = rospy.Publisher("kf_tag/vel", TwistStamped, queue_size=10)
def predict(self, u = 0):
self.x = np.dot(self.F, self.x) + np.dot(self.B, u)
self.publish_kf_est() #publish the kf estimates for position and vel of tag
self.P = np.dot(np.dot(self.F, self.P), self.F.T) + self.Q
return self.x
def update(self):
y = self.z - np.dot(self.H, self.x)
S = self.R + np.dot(self.H, np.dot(self.P, self.H.T))
K = np.dot(np.dot(self.P, self.H.T), np.linalg.inv(S))
self.x = self.x + np.dot(K, y) #udpate state matrix
I = np.eye(self.n)
self.P = np.dot(np.dot(I - np.dot(K, self.H), self.P),
(I - np.dot(K, self.H)).T) + np.dot(np.dot(K, self.R), K.T)
def tagpose_cb(self,msg):
px = msg.pose.position.x
py = msg.pose.position.y
self.z = np.array([[px,py]]).T
return self.z
def publish_kf_est(self):
now = rospy.Time.now()
pose_msg = PoseStamped()
pose_msg.header.frame_id = "ekf_tag"
pose_msg.header.stamp = now
pose_msg.pose.position.x = self.x[0,0]
pose_msg.pose.position.y = self.x[1,0]
self.kf_pub.publish(pose_msg)
vel_msg = TwistStamped()
vel_msg.header.frame_id = "ekf_tag_vel"
vel_msg.header.stamp = now
vel_msg.twist.linear.x = self.x[2,0]
vel_msg.twist.linear.y = self.x[3,0]
self.kf_vel_pub.publish(vel_msg)
if __name__ == "__main__":
rospy.init_node("ekf_tag", anonymous=True)
print("starting")
rate_val = 10
#init vals
dt = 1/rate_val
####### CONSTANT ACCELERATION MODEL##########
#This array is for constant acceleartion so size 6
x_1 = [1, 0.0, dt, 0.0, 1/2.0*dt**2, 0.0] #px
x_2 = [0.0, 1, 0.0, dt, 0.0, 1/2.0*dt**2] #py
x_3 = [0.0, 0.0 , 1, 0.0, dt, 0.0] #vx
x_4 = [0.0, 0.0, 0.0, 1, 0.0, dt] #vy
x_5 = [0.0, 0.0, 0.0, 0.0, 1, 0.0] #ax
x_6 = [0.0, 0.0, 0.0, 0.0, 0.0, 1] #ay
#F = np.array([[1, dt, 0, 0], [0, 1, dt, 0], [0, 0, 1, 0]])
F = np.array([x_1, x_2, x_3, x_4, x_5, x_6]) #feeding in x values in array
print(F.shape)
h_1 = [1, 0.0, 0.0, 0.0, 0.0, 0.0] #measurement of px
h_2 = [0.0, 1, 0.0, 0.0, 0.0, 0.0] #measurement of py
H = np.array([h_1, h_2])
print(H.shape)
Q_fact = 1E-6 # process noise covariance constant
Q = np.array([[Q_fact, 0, 0, 0, 0, 0],
[0, Q_fact, 0, 0, 0, 0],
[0, 0, Q_fact, 0, 0, 0],
[0, 0 , 0, Q_fact, 0, 0],
[0, 0 , 0, 0, Q_fact, 0],
[0, 0 , 0, 0, 0, Q_fact]])
##################################################
############ CONSTANT VELOCITY MODEL###############
"""
# This model is for constant velocity size 4x4
x_1 = [1, 0, dt, 0]
x_2 = [0, 1, 0, dt]
x_3 = [0, 0, 1, 0]
x_4 = [0, 0, 0, 1]
F = np.array([x_1, x_2, x_3, x_4])
h_1 = [1, 0, 0, 0]
h_2 = [0, 1, 0, 0]
H = np.array([h_1, h_2])
Q_fact = 1E-5 #process noise variance
Q = np.array([[Q_fact, 0, 0, 0],
[0, Q_fact, 0, 0],
[0, 0, Q_fact, 0],
[0, 0, 0, Q_fact]])
"""
##################################################
##### NOISE FACTOR AND INPUT TO KALMAN FILTER
R_factor = 0.4 # measurement of camera saying .3m off
R = np.array([[R_factor, 0], [0, R_factor]]) #measurement noise for kalman filter
kf = KalmanFilter(F = F, H = H, Q = Q, R = R) #import matrices into class
rate = rospy.Rate(rate_val)
while not rospy.is_shutdown():
kf.predict()
kf.update()
rate.sleep()
```
#### File: utm/scripts/land_permission.py
```python
import rospy
import tf
import numpy as np
from geometry_msgs.msg import PoseStamped
from std_msgs.msg import Bool
"""
Class precision listens to drone and hears a request from drone to land
if it does request a land -> probably make this a service? allow permission
"""
class PrecLand():
def __init__(self):
self.pub = rospy.Publisher("precland", Bool, queue_size=10)
self.sub = rospy.Subscriber("target_found", Bool, self.target_foundcb)
quad_odom_sub = rospy.Subscriber("mavros/offset_local_position/pose", PoseStamped, self.quad_odom_cb)
self.target_found = False
self.allow_land = Bool()
self.z = 0.0
def target_foundcb(self,msg):
self.target_found = msg.data
def quad_odom_cb(self,msg):
z = msg.pose.position.z
#need to make sure that we quad is also stablized and that the error of
#tag and drone is within tolerance to allow safe landing
def check_permission(self):
if self.target_found == True or self.z < 0.8: #probably need to set this better
self.allow_land.data = True
self.pub.publish(self.allow_land)
else:
self.allow_land.data = False
self.pub.publish(self.allow_land)
if __name__=='__main__':
rospy.init_node("land_permission", anonymous=True)
rate_val = 30
rate = rospy.Rate(rate_val)
precland = PrecLand()
while not rospy.is_shutdown():
precland.check_permission()
rate.sleep()
``` |
{
"source": "jna29/SymGP",
"score": 3
} |
#### File: symgp/superexpressions/supermatexpr.py
```python
import string
from typing import List, Union, Any, Tuple, Iterable
from sympy import MatrixSymbol, BlockMatrix, Symbol, Inverse, Transpose, MatMul, MatAdd, ZeroMatrix, \
MatrixExpr, S, Identity
from sympy.core.decorators import call_highest_priority
from sympy.strategies import (rm_id, unpack, typed, flatten, sort, condition, exhaust,
do_one, new, glom)
from .supermatbase import SuperMatBase
from .supermatmul import SuperMatMul
from .supermatadd import SuperMatAdd
from symgp.utils import utils
# Some unicode symbols
SMALL_MU_GREEK = '\u03bc'
BIG_SIGMA_GREEK = '\u03a3'
BIG_OMEGA_GREEK = '\u03a9'
BIG_LAMBDA_GREEK = '\u039b'
SMALL_ETA_GREEK = '\u03b7'
Vector_t = List[MatrixExpr]
Matrix_t = List[List[MatrixExpr]]
class SuperMatSymbol(SuperMatBase, MatrixSymbol):
_op_priority = 99
_used_names = []
ALLOWED_PARAM_TYPES = ['mean', 'cov', 'invcov']
ALLOWED_MAT_TYPES = ALLOWED_PARAM_TYPES + ['var', 'sym', 'other', 'diag', 'blkdiag']
def __new__(cls, m, n, name='', mat_type='other', dep_vars=None, cond_vars=None,
expanded=None, blockform=None):
"""
The SuperMatSymbol constructor.
It is mainly used to automatically select the name of the symbol based on the parameters
given. We have to do it here because this is the only way to set the name.
(See ``__init__`` below for arguments)
:return: An object of class type ``MatrixSymbol`` with
"""
# Create name of symbol based on dep_vars and cond_vars if this is a parameter symbol e.g.
# 'mean', 'cov', 'invcov', etc.
if mat_type in SuperMatSymbol.ALLOWED_PARAM_TYPES and name == '':
if mat_type == 'mean':
if blockform is None and expanded is None:
pre_sym = SMALL_MU_GREEK
else:
pre_sym = 'm'
elif mat_type == 'cov':
if blockform is None and expanded is None:
pre_sym = BIG_SIGMA_GREEK
else:
pre_sym = 'S'
else: # mat_type == 'invcov':
if blockform is None and expanded is None:
pre_sym = BIG_SIGMA_GREEK + '_i'
else:
pre_sym = 'S_i'
name += pre_sym + '_{'
if n != 1 and not isinstance(dep_vars[0], list):
name += utils.create_distr_name([dep_vars, dep_vars], cond_vars) + '}'
else:
name += utils.create_distr_name(dep_vars, cond_vars) + '}'
else:
if cond_vars and mat_type not in SuperMatSymbol.ALLOWED_PARAM_TYPES:
raise Warning("cond_vars should only be set for mat_type = {}".format(
SuperMatSymbol.ALLOWED_PARAM_TYPES))
#if name not in SuperMatSymbol._used_names:
SuperMatSymbol._used_names.append(name)
#else:
# raise Exception("This name has been used: {}".format(name))
return MatrixSymbol.__new__(cls, name, m, n)
def __init__(self, m: Symbol, n: Symbol, name: str = '', mat_type: str = 'other',
dep_vars: Union[List[Any], List[List[Any]]] = None,
cond_vars: List[Any] = None, expanded: MatrixExpr = None,
blockform: Union[List[MatrixExpr], List[List[MatrixExpr]]] = None):
"""
The SuperMatSymbol initialiser.
:param name: The name of the symbol. Only used with mat_type == 'var' or 'other' (See
'__new__'). Otherwise specify use: name=''. An error is raised if this is violated.
:param m: Number of rows of matrix
:param n: Number of columns of matrix
:param mat_type: Type of matrix. Can be 'cov', 'invcov', 'mean', 'var' or 'other'
:param dep_vars: The variables that this symbol depends on. These are the variables
that this symbol is a function of which appear in the ``expanded/blockform`` directly
or through other matrices.
Leaving this to the default value means that there are no
dependent variables and so ``expanded/blockform`` shouldn't depend on any variables.
When ``blockform`` is specified, these variables are used to match the entries of the
block matrix.
For example, if ``blockform = [[S_xx, S_xy], [S_yx, S_yy]]`` (see ``blockform`` doc below)
where the entries are in general ``MatrixExpr``s then ``dep_vars = [x, y]`` or ``dep_vars =[[
x, y], [x, y]]`` where ``x`` and ``y`` are ``Variable``s (see below). The second form
where we have a list of lists is useful when the ``blockform`` isn't necessarily square.
The first list matches the first dimension of ``blockform`` and similarly for the second.
:param cond_vars: The variables this parameter is conditioned on. Can only be used for
'cov', 'invcov', 'mean', 'natmean', 'precision'.
:param expanded: The full algebraic expression represented by this symbol. For example,
``expanded = A - B*C.I*D`` where the letters are ``MatrixExpr``s in general.
:param blockform: A block matrix representation of the matrix. This contains the
``MatrixExpr``s in the block matrix stored as a list of lists. For example,
if a ``SuperMatSymbol`` represents a 2x2 block matrix, ``blockform = [[A, B], [C,D]]``
where the entries are ``MatrixExpr``s.
"""
self.mat_type = mat_type
self.dep_vars = dep_vars
self.cond_vars = list(cond_vars) if cond_vars else []
self.expanded = expanded
self.blockform = blockform # type: Union[List[MatrixExpr], List[List[MatrixExpr]]]
self.variables_dim1 = {}
self.variables_dim2 = {}
if dep_vars:
dep_vars = list(dep_vars)
if isinstance(dep_vars[0], list): # List of lists case
for i in range(len(dep_vars[0])):
if dep_vars[0][i] not in self.variables_dim1:
self.variables_dim1[dep_vars[0][i]] = i
for i in range(len(dep_vars[1])):
if dep_vars[1][i] not in self.variables_dim2:
self.variables_dim2[dep_vars[1][i]] = i
else: # All other cases
for i in range(len(dep_vars)):
if dep_vars[i] not in self.variables_dim1:
self.variables_dim1[dep_vars[i]] = i
if n != 1:
for i in range(len(dep_vars)):
if dep_vars[i] not in self.variables_dim2:
self.variables_dim2[dep_vars[i]] = i
def doit(self, **hints):
"""
We have to change MatrixSymbol.doit so that objects are constructed appropriately
Modeled off MatrixSymbol.doit in https://github.com/sympy/sympy/blob/master/sympy/matrices/
expressions/matexpr.py
"""
return self
def inverse(self):
return SuperMatInverse(self)
I = property(inverse, None, None, 'Matrix inversion')
def transpose(self):
if ((self.mat_type == 'cov' or self.mat_type == 'invcov' or self.mat_type == 'sym') and
(self.shape[0] == self.shape[1])):
return self
else:
return SuperMatTranspose(self)
T = property(transpose, None, None, 'Matrix transposition')
def partition(self, indices: Union[List[int], List[List[int]]]) -> Iterable:
"""
Partition a ``blockform`` into sections defined by ``indices``.
- With a mxn matrix, four partitions are created.
- With a mx1 or 1xm matrix, two partitions are created
:param indices: The indices that define which elements to group from the blockform. This
is a 2-D list of lists for mxn matrices and 1-D list for mx1 or 1xm matrices where each
list specifies the indices for each dimension
:return: P, Q (, R, S) The partitions of the matrix. R and S are returned if matrix is mxn
for m != 1 and n != 1.
"""
if self.blockform is not None:
# First check indices are valid
if all([isinstance(l, list) for l in indices]) and len(indices) == 2:
assert (all([isinstance(l, list) for l in self.blockform])), \
"self.blockform must be a 2-D list of lists for these indices"
assert (all([i >= 0 and i < len(self.blockform) for i in indices[0]])), \
"Invalid first set of indices"
assert (all([i >= 0 and i < len(self.blockform[0]) for i in indices[1]])), \
"Invalid first set of indices"
P, Q, R, S = utils.partition_block(self.blockform, indices)
# Sort the variables based on their index in 'self.blockform'
variables_dim1_keys = sorted(self.variables_dim1.keys(),
key=lambda m: self.variables_dim1[m])
variables_dim2_keys = sorted(self.variables_dim2.keys(),
key=lambda m: self.variables_dim2[m])
# Get variables for both sets in both dimensions
indices_vars_dim1 = [v for v in variables_dim1_keys
if self.variables_dim1[v] in indices[0]]
indices_vars_dim2 = [v for v in variables_dim2_keys if
self.variables_dim2[v] in indices[1]]
rem_vars_dim1 = [v for v in variables_dim1_keys if
self.variables_dim1[v] not in indices[0]]
rem_vars_dim2 = [v for v in variables_dim2_keys if
self.variables_dim2[v] not in indices[1]]
# Get shapes of two sets in both dimensions
m1 = sum([v.shape[0] for v in rem_vars_dim1])
n1 = sum([v.shape[0] for v in rem_vars_dim2])
m2 = sum([v.shape[0] for v in indices_vars_dim1])
n2 = sum([v.shape[0] for v in indices_vars_dim2])
# TODO: Maybe change this to have the type correspond to ``self``
# Create partition symbols. self.mat_type must be one of {'cov', 'invcov',
# 'precision', 'other'} ('other' must be 2-D)
def get_partition(mat, m, n, dep_vars):
"""Returns a corrected partition"""
if len(mat) == 1 and len(mat[0]) == 1:
return mat[0][0]
else:
return SuperMatSymbol(m, n, mat_type=self.mat_type, dep_vars=dep_vars,
cond_vars=self.cond_vars, blockform=mat)
P = get_partition(P, m1, n1, rem_vars_dim1)
Q = get_partition(Q, m1, n2, [rem_vars_dim1, indices_vars_dim2])
R = get_partition(R, m2, n1, [indices_vars_dim1, rem_vars_dim2])
S = get_partition(S, m2, n2, indices_vars_dim1)
return P, Q, R, S
elif all([isinstance(l, int) for l in indices]):
assert (all([not isinstance(l, list) for l in
self.blockform])), "self.blockform must be a 1-D list"
assert (all([i >= 0 and i < len(self.blockform) for i in
indices])), "Invalid set of indices"
P, S = utils.partition_block(self.blockform, indices)
# Sort variables based on their index in 'self.blockform'
variables_keys = sorted(self.variables_dim1.keys(),
key=lambda m: self.variables_dim1[m])
# Get variables for both sets
indices_vars = [v for v in variables_keys if self.variables_dim1[v] in indices]
rem_vars = [v for v in variables_keys if self.variables_dim1[v] not in indices]
# Get shapes of two sets
m1 = sum([v.shape[0] for v in rem_vars])
m2 = sum([v.shape[0] for v in indices_vars])
# If we partition a ``Variable``, we need to create a new name for each half
if self.mat_type == 'var':
name1 = 'v_(' + ','.join([v.name for v in rem_vars]) + ')'
name2 = 'v_(' + ','.join([v.name for v in indices_vars]) + ')'
else:
name1 = ''
name2 = ''
# TODO: Maybe change this to have the type corresponding to ``self``
if len(P) == 1:
P = P[0]
else:
P = SuperMatSymbol(m1, 1, name1, mat_type=self.mat_type, dep_vars=rem_vars,
blockform=P)
if len(S) == 1:
S = S[0]
else:
S = SuperMatSymbol(m2, 1, name2, mat_type=self.mat_type, dep_vars=indices_vars,
blockform=S)
return P, S
else:
raise Exception("Invalid set of indices")
else:
raise Exception("Operation can't be performed as there is no blockform.")
def expand_partition(self):
"""
Expands blockform to replace SuperMatSymbol representations with their blockforms.
Only works with 2x2 block matrices or 1x2 block vectors (i.e. [a,b])
"""
if self.blockform is not None:
# Check size of blockform
if len(self.blockform) == 2 and isinstance(self.blockform[0], list) and len(
self.blockform[0]) == 2:
P_exp, Q_exp = self.blockform[0][0].blockform, self.blockform[0][1].blockform
R_exp, S_exp = self.blockform[1][0].blockform, self.blockform[1][1].blockform
# Check blockforms exist
for i, block in enumerate([P_exp, Q_exp, R_exp, S_exp]):
if block is None:
raise Exception("Block matrix {}, {} doesn't have a blockform".format(
i//2, i%2))
# Check that the shapes match
assert (len(P_exp) == len(Q_exp))
assert (len(P_exp[0]) == len(R_exp[0]))
assert (len(Q_exp[0]) == len(S_exp[0]))
assert (len(R_exp) == len(S_exp))
# Create the top and bottom part of the larger matrix i.e.
# top = [P_exp, Q_exp], bottom = [R_exp, S_exp]
top = []
for row1, row2 in zip(P_exp, Q_exp):
top.append(row1 + row2)
bottom = []
for row1, row2 in zip(R_exp, S_exp):
bottom.append(row1 + row2)
self.blockform = top + bottom
elif not isinstance(self.blockform[0], list) and len(self.blockform) > 1:
P_exp = self.blockform[0].blockform
S_exp = self.blockform[1].blockform
self.blockform = P_exp + S_exp
else:
raise Exception("self.blockform isn't a 2x2 matrix or a 1x2 list")
else:
raise Exception("This symbol has no blockform")
def to_full_expr(self):
"""
Returns the full expression for the blockform or expanded of this symbol
"""
if self.blockform is not None:
if all([isinstance(l, list) for l in self.blockform]) and all([len(l) == len(
self.blockform[0]) for l in self.blockform]):
m, n = len(self.blockform), len(self.blockform[0])
full_expr_blockform = []
for i in range(m):
full_expr_blockform.append(
[utils.expand_to_fullexpr(s) for s in self.blockform[i]])
return full_expr_blockform
elif all([isinstance(e, MatrixExpr) for e in self.blockform]):
return [utils.expand_to_fullexpr(s) for s in self.blockform]
else:
raise Exception("self.blockform is invalid: {}".format(self.blockform))
elif self.expanded is not None:
return utils.expand_to_fullexpr(self)
else:
return self
@staticmethod
def getUsedNames():
return SuperMatSymbol._used_names
@staticmethod
def used(name: str):
"""
Checks whether name is used
"""
return name in SuperMatSymbol._used_names
class SuperMatInverse(SuperMatBase, Inverse):
_op_priority = 10000
def __new__(cls, mat: SuperMatBase, expanded: MatrixExpr=None,
blockform: List[List[MatrixExpr]]=None):
"""
See '__init__' below for definition of parameters
"""
return Inverse.__new__(cls, mat)
def __init__(self, mat: SuperMatBase, expanded: MatrixExpr=None,
blockform: List[List[MatrixExpr]]=None):
"""
Creates an inverse matrix for the given ``SuperMatBase`` derived symbol
:param mat: The matrix/matrix expression to calculate the inverse of.
:param expanded: The expanded expression if it exists. If this isn't specified, we
calculate the ``self.expanded`` automatically from that in ``mat``.
:param blockform: The block matrix expression. Must be an N x N list of lists. If this
isn't specified, we calculate the ``self.blockform`` automatically from that in ``mat``.
"""
if not blockform is None:
assert len(blockform) == len(blockform[0]), "blockform must be square"
self.expanded = expanded
self.blockform = blockform
if any([isinstance(mat, sym) for sym in [SuperMatSymbol, SuperMatInverse,
SuperMatTranspose, SuperDiagMat]]):
if not mat.blockform is None:
assert len(mat.blockform) == len(mat.blockform[0]), \
"``mat`` must have a square blockform"
if mat.mat_type == 'cov':
self.mat_type = 'invcov'
elif mat.mat_type == 'invcov':
self.mat_type = 'cov'
else:
self.mat_type = mat.mat_type
self.dep_vars = mat.dep_vars
self.cond_vars = mat.cond_vars
self.variables_dim1 = mat.variables_dim1
self.variables_dim2 = mat.variables_dim2
if not mat.expanded is None and not self.expanded is None:
self.expanded = mat.expanded.I
if not mat.blockform is None and len(mat.blockform) > 1 and not self.blockform is None:
self.blockform = utils.matinv(mat.blockform)
vars_dim1 = sorted(mat.variables_dim1.keys(), key=lambda m: mat.variables_dim1[m])
vars_dim2 = sorted(mat.variables_dim2.keys(), key=lambda m: mat.variables_dim2[m])
for i in range(len(self.blockform)):
for j in range(len(self.blockform[0])):
var_i, var_j = vars_dim1[i], vars_dim2[j]
self.blockform[i][j] = SuperMatSymbol(var_i.shape[0], var_j.shape[0],
mat_type=self.mat_type,
dep_vars=[var_i, var_j],
cond_vars=mat.cond_vars,
expanded=self.blockform[i][j])
else:
self.mat_type = None
self.dep_vars = None
self.cond_vars = None
self.variables_dim1 = None
self.variables_dim2 = None
def transpose(self):
return self.arg.transpose().doit().inverse()
T = property(transpose, None, None, 'Matrix transposition')
def doit(self, **hints):
return self.arg.inverse()
@property
def name(self):
return self.arg.name
class SuperMatTranspose(SuperMatBase, Transpose):
_op_priority = 10000
def __new__(cls, mat: SuperMatBase, expanded: MatrixExpr = None,
blockform: List[List[MatrixExpr]] = None):
"""
See '__init__' below for definition of parameters
"""
return Transpose.__new__(cls, mat)
def __init__(self, mat: SuperMatBase, expanded: MatrixExpr = None,
blockform: List[List[MatrixExpr]] = None):
"""
Creates a transpose matrix for the given ``SuperMatBase`` derived symbol
:param mat: The matrix/matrix expression to calculate the transpose of.
:param expanded: The expanded expression if it exists. If this isn't specified, we
calculate the ``self.expanded`` automatically from that in ``mat``.
:param blockform: The block matrix expression. If this isn't specified, we calculate the
``self.blockform`` automatically from that in ``mat``.
"""
self.expanded = expanded
self.blockform = blockform
if any([isinstance(mat, sym) for sym in [SuperMatSymbol, SuperMatInverse,
SuperMatTranspose]]):
self.mat_type = mat.mat_type
self.dep_vars = mat.dep_vars
self.cond_vars = mat.cond_vars
self.variables_dim1 = mat.variables_dim2
self.variables_dim2 = mat.variables_dim1
if not mat.expanded is None and not self.expanded is None:
self.expanded = mat.expanded.T
if not mat.blockform is None and not self.blockform is None:
if isinstance(mat.blockform, Vector_t):
self.blockform = []
self.blockform.extend([x.T for x in mat.blockform])
elif isinstance(mat.blockform, Matrix_t):
self.blockform = [[0 for _ in range(len(mat.blockform))] for _ in
range(len(mat.blockform[0]))]
for i in range(len(mat.blockform)):
for j in range(len(mat.blockform[0])):
self.blockform[j][i] = mat.blockform[i][j].T
else:
self.mat_type = None
self.dep_vars = None
self.cond_vars = None
self.variables_dim1 = None
self.variables_dim2 = None
def doit(self, **hints):
return self.arg.transpose()
@property
def name(self):
return self.arg.name
class SuperDiagMat(SuperMatBase, MatrixExpr):
_op_priority = 10005
def __new__(cls, mat: SuperMatBase, expanded: MatrixExpr = None,
blockform: List[List[MatrixExpr]] = None):
"""
See '__init__' below for definition of parameters
"""
if isinstance(mat, SuperDiagMat):
return mat
elif isinstance(mat, SuperBlockDiagMat):
return SuperDiagMat(mat.arg, expanded=mat.expanded, blockform=mat.blockform)
else:
return MatrixExpr.__new__(cls, mat)
def __init__(self, mat: SuperMatBase, expanded: MatrixExpr = None,
blockform: List[List[MatrixExpr]] = None):
"""
Creates a diagonal matrix for the given ``SuperMatBase`` derived symbol
:param mat: The matrix/matrix expression to calculate the diagonal matrix of.
:param expanded: The expanded expression if it exists.
:param blockform: The block matrix expression.
"""
self.expanded = expanded
self.blockform = blockform
if any([isinstance(mat, sym) for sym in [SuperMatSymbol, SuperMatInverse,
SuperMatTranspose, SuperDiagMat,
SuperBlockDiagMat]]):
self.mat_type = 'diag'
self.dep_vars = mat.dep_vars
self.cond_vars = mat.cond_vars
self.variables_dim1 = mat.variables_dim1
self.variables_dim2 = mat.variables_dim2
else:
self.mat_type = 'diag'
self.dep_vars = None
self.cond_vars = None
self.variables_dim1 = None
self.variables_dim2 = None
def __repr__(self):
return 'diag[' + repr(self.arg) + ']'
def __str__(self):
return 'diag[' + str(self.arg) + ']'
def _sympystr(self, *args, **kwargs):
return self.__str__()
def __neg__(self):
return SuperMatMul(S.NegativeOne, self).doit()
def __abs__(self):
raise NotImplementedError
@call_highest_priority('__radd__')
def __add__(self, other):
if isinstance(other, SuperDiagMat):
return SuperDiagMat(SuperMatAdd(self.arg, other.arg).doit()).doit()
elif isinstance(other, MatMul) and any([isinstance(a, Identity) for a in other.args]):
return SuperDiagMat(SuperMatAdd(self.arg, other).doit()).doit()
else:
return SuperMatAdd(self, other).doit()
@call_highest_priority('__add__')
def __radd__(self, other):
if isinstance(other, SuperDiagMat):
return SuperDiagMat(SuperMatAdd(other.arg, self.arg).doit())
elif isinstance(other, MatMul) and any([isinstance(a, Identity) for a in other.args]):
return SuperDiagMat(SuperMatAdd(other, self.arg).doit()).doit()
else:
return SuperMatAdd(other, self).doit()
@call_highest_priority('__rsub__')
def __sub__(self, other):
if isinstance(other, SuperDiagMat):
return SuperDiagMat(SuperMatAdd(self.arg, -other.arg).doit())
elif isinstance(other, MatMul) and any([isinstance(a, Identity) for a in other.args]):
return SuperDiagMat(SuperMatAdd(self.arg, -other).doit()).doit()
else:
return SuperMatAdd(self, -other).doit()
@call_highest_priority('__sub__')
def __rsub__(self, other):
if isinstance(other, SuperDiagMat):
return SuperDiagMat(SuperMatAdd(other.arg, -self.arg).doit())
elif isinstance(other, MatMul) and any([isinstance(a, Identity) for a in other.args]):
return SuperDiagMat(SuperMatAdd(other, -self.arg).doit()).doit()
else:
return SuperMatAdd(other, -self).doit()
def inverse(self):
return SuperMatInverse(self)
I = property(inverse, None, None, 'Matrix inversion')
def transpose(self):
return self
T = property(transpose, None, None, 'Matrix transposition')
def doit(self, **hints):
return self
@property
def arg(self):
return self.args[0]
@property
def name(self):
return self.arg.name
@property
def shape(self):
return (self.arg.shape[0], self.arg.shape[1])
class SuperBlockDiagMat(SuperMatBase, MatrixExpr):
_op_priority = 10010
def __new__(cls, mat: SuperMatBase, expanded: MatrixExpr = None,
blockform: List[List[MatrixExpr]] = None):
"""
See '__init__' below for definition of parameters
"""
if isinstance(mat, SuperBlockDiagMat) or isinstance(mat, SuperDiagMat):
return mat
else:
return MatrixExpr.__new__(cls, mat)
def __init__(self, mat: SuperMatBase, expanded: MatrixExpr = None,
blockform: List[List[MatrixExpr]] = None):
"""
Creates a block diagonal matrix for the given ``SuperMatBase`` derived symbol
:param mat: The matrix/matrix expression to calculate the block diagonal matrix of.
:param expanded: The expanded expression if it exists.
:param blockform: The block matrix expression.
"""
self.expanded = expanded
self.blockform = blockform
if all([isinstance(mat, sym) for sym in [SuperMatSymbol, SuperMatInverse,
SuperMatTranspose, SuperDiagMat,
SuperBlockDiagMat]]):
self.mat_type = 'blkdiag'
self.dep_vars = mat.dep_vars
self.cond_vars = mat.cond_vars
self.variables_dim1 = mat.variables_dim1
self.variables_dim2 = mat.variables_dim2
else:
self.mat_type = 'blkdiag'
self.dep_vars = None
self.cond_vars = None
self.variables_dim1 = None
self.variables_dim2 = None
def __repr__(self):
return 'blkdiag[' + repr(self.arg) + ']'
def __str__(self):
return 'blkdiag[' + repr(self.arg) + ']'
def _sympystr(self, *args, **kwargs):
return self.__str__()
def __neg__(self):
return SuperMatMul(S.NegativeOne, self).doit()
def __abs__(self):
raise NotImplementedError
@call_highest_priority('__radd__')
def __add__(self, other):
if isinstance(other, SuperBlockDiagMat):
return SuperBlockDiagMat(SuperMatAdd(self.arg, other.arg).doit())
elif isinstance(other, MatMul) and any([isinstance(a, Identity) for a in other.args]):
return SuperBlockDiagMat(SuperMatAdd(self.arg, other).doit()).doit()
else:
return SuperMatAdd(self, other).doit()
@call_highest_priority('__add__')
def __radd__(self, other):
if isinstance(other, SuperBlockDiagMat):
return SuperBlockDiagMat(SuperMatAdd(other.arg, self.arg).doit())
elif isinstance(other, MatMul) and any([isinstance(a, Identity) for a in other.args]):
return SuperBlockDiagMat(SuperMatAdd(other, self.arg).doit()).doit()
else:
return SuperMatAdd(other, self).doit()
@call_highest_priority('__rsub__')
def __sub__(self, other):
if isinstance(other, SuperBlockDiagMat):
return SuperBlockDiagMat(SuperMatAdd(self.arg, -other.arg).doit())
elif isinstance(other, MatMul) and any([isinstance(a, Identity) for a in other.args]):
return SuperBlockDiagMat(SuperMatAdd(self.arg, -other).doit()).doit()
else:
return SuperMatAdd(self, -other).doit()
@call_highest_priority('__sub__')
def __rsub__(self, other):
if isinstance(other, SuperBlockDiagMat):
return SuperBlockDiagMat(SuperMatAdd(other.arg, -self.arg).doit())
elif isinstance(other, MatMul) and any([isinstance(a, Identity) for a in other.args]):
return SuperBlockDiagMat(SuperMatAdd(other, -self.arg).doit()).doit()
else:
return SuperMatAdd(other, -self).doit()
def inverse(self):
return SuperMatInverse(self)
I = property(inverse, None, None, 'Matrix inversion')
def transpose(self):
return SuperBlockDiagMat(self.arg.T).doit()
T = property(transpose, None, None, 'Matrix transposition')
def doit(self, **hints):
return self
@property
def arg(self):
return self.args[0]
@property
def name(self):
return self.arg.name
@property
def shape(self):
return (self.arg.shape[0], self.arg.shape[1])
class Variable(SuperMatSymbol):
def __new__(cls, name: str, m: Union[Symbol, int], n: Union[Symbol, int]):
return SuperMatSymbol.__new__(cls, m, n, name=name, mat_type='var')
def __init__(self, name: str, m: Union[Symbol, int], n: Union[Symbol, int]):
"""
Constructor for a Variable symbol
:param name: The variable name
:param m: Number of rows
:param n: Number of columns
"""
SuperMatSymbol.__init__(self, m, n, name=name, mat_type='var')
class CompositeVariable(SuperMatSymbol):
"""
Represents a vector of individual ``Variable`` or ``CompositeVariable`` objects
"""
def __new__(cls, name: str, variables):
assert all([v.shape[1] == 1 for v in variables])
m = sum([v.shape[0] for v in variables])
n = variables[0].shape[1]
return SuperMatSymbol.__new__(cls, m, n, name=name, blockform=variables, mat_type='var')
def __init__(self, name: str, variables):
"""
Creates a combined variable from a list of ``Variable``s and/or ``CompositeVariable``s
:param name: The name for this combined variable.
:param variables: The list of ``Variable``/``CompositeVariable`` objects. They must all
have shape (?,1) where ``?`` can vary for each variable
"""
m = sum([v.shape[0] for v in variables])
n = variables[0].shape[1]
SuperMatSymbol.__init__(self, m, n, name=name, blockform=variables, mat_type='var')
class Mean(SuperMatSymbol):
def __new__(cls, v: Union[Variable, CompositeVariable, List[Union[Variable, CompositeVariable]]],
cond_vars: List[Union[Variable, CompositeVariable]]=None, name: str='',
full_expr: Union[MatrixExpr, Vector_t]=None):
# Create name
if name == '':
if full_expr is None:
pre_sym = SMALL_MU_GREEK
else:
pre_sym = 'm'
name += pre_sym + '_{'
if isinstance(v, list):
name += utils.create_distr_name(v, cond_vars) + '}'
else:
name += utils.create_distr_name([v], cond_vars) + '}'
full_full_expr = utils.expand_to_fullexpr(full_expr) if isinstance(full_expr, MatrixExpr) else \
full_expr
if not full_expr is None and isinstance(full_full_expr, Mean) and \
full_full_expr.name == name:
return full_expr
if isinstance(v, list):
shape = sum([t.shape[0] for t in v])
shape = (shape, 1)
variables = v
else:
shape = v.shape
variables = [v]
if isinstance(full_expr, list):
assert utils.is_vector(full_expr), "full_expr must be a 1-D vector (i.e. a list)"
return SuperMatSymbol.__new__(cls, shape[0], shape[1], name=name, mat_type='mean',
dep_vars=variables, cond_vars=cond_vars, blockform=full_expr)
elif isinstance(full_expr, MatrixExpr):
return SuperMatSymbol.__new__(cls, shape[0], shape[1], name=name, mat_type='mean',
dep_vars=variables, cond_vars=cond_vars, expanded=full_expr)
elif full_expr is None:
return SuperMatSymbol.__new__(cls, shape[0], shape[1], name=name, mat_type='mean',
dep_vars=variables, cond_vars=cond_vars)
else:
raise Exception("Invalid full_expr provided: {}. Must be a list ("
"blockform), MatrixExpr (expanded) or None (no "
"expanded/blockform)".format(full_expr))
def __init__(self, v: Union[Variable, CompositeVariable, List[Union[Variable, CompositeVariable]]],
cond_vars: List[Union[Variable, CompositeVariable]]=None, name: str='',
full_expr: Union[MatrixExpr, Vector_t]=None):
"""
Constructor for a Mean symbol. This only works for distributions where variables aren't
conditioned on others.
:param v: The random variable this symbol is a mean for
:param cond_vars: The optional conditioned-on variables for the (implicit) distribution
that this mean symbol is a parameter of.
:param name: Optional name for this mean symbol. If left as is, a name is created
automatically.
:param full_expr: The expanded or blockform for this Mean symbol if it exists.
"""
full_full_expr = utils.expand_to_fullexpr(full_expr) if isinstance(full_expr, MatrixExpr) else \
full_expr
if not full_expr is None and isinstance(full_full_expr, Mean) and full_full_expr.name == \
self.name:
print("self.name: ", self.name)
print("name: ", name)
return
if isinstance(v, list):
shape = sum([t.shape[0] for t in v])
shape = (shape, 1)
variables = v
else:
shape = v.shape
variables = [v]
if isinstance(full_expr, list):
assert utils.is_vector(full_expr), "full_expr must be a 1-D vector (i.e. a list)"
SuperMatSymbol.__init__(self, shape[0], shape[1], name=name, mat_type='mean',
dep_vars=variables, cond_vars=cond_vars, blockform=full_expr)
elif isinstance(full_expr, MatrixExpr):
SuperMatSymbol.__init__(self, shape[0], shape[1], name=name, mat_type='mean',
dep_vars=variables, cond_vars=cond_vars, expanded=full_expr)
elif full_expr is None:
SuperMatSymbol.__init__(self, shape[0], shape[1], name=name, mat_type='mean',
dep_vars=variables, cond_vars=cond_vars)
else:
raise Exception("Invalid full_expr provided: {}. Must be a list ("
"blockform), MatrixExpr (expanded) or None (no "
"expanded/blockform)".format(full_expr))
class Covariance(SuperMatSymbol):
def __new__(cls,
v1: Union[Variable, CompositeVariable, List[Union[Variable, CompositeVariable]]],
v2: Union[Variable, CompositeVariable, List[Union[Variable, CompositeVariable]]] = None,
cond_vars: List[Union[Variable, CompositeVariable]] = None, name: str='',
full_expr: Union[MatrixExpr, Matrix_t]=None):
if v2 is None:
v2 = v1
variables = [v1, v2]
# Create name
if name == '':
if full_expr is None:
pre_sym = BIG_SIGMA_GREEK
else:
pre_sym = 'S'
name += pre_sym + '_{'
if isinstance(v1, list) and isinstance(v2, list):
name += utils.create_distr_name(variables, cond_vars) + '}'
elif ((isinstance(v1, Variable) or isinstance(v1, CompositeVariable)) and
(isinstance(v2, Variable) or isinstance(v2, CompositeVariable))):
name += utils.create_distr_name(variables, cond_vars) + '}'
else:
raise Exception("v1 and v2 must be the same. They can either be a list of "
"CompositeVariable/Variable or CompositeVariable/Variables themselves.")
full_full_expr = utils.expand_to_fullexpr(full_expr) if isinstance(full_expr, MatrixExpr) else \
full_expr
if not full_expr is None and isinstance(full_full_expr, Covariance) and \
full_full_expr.name == name:
return full_expr
if isinstance(v1, list) and isinstance(v2, list):
shape_v1 = sum([v.shape[0] for v in v1])
shape_v2 = sum([v.shape[0] for v in v2])
#assert shape_v1 == shape_v2, "Both lists of variables must have same shape"
shape = (shape_v1, shape_v2)
elif ((isinstance(v1, Variable) or isinstance(v1, CompositeVariable)) and
(isinstance(v2, Variable) or isinstance(v2, CompositeVariable))):
#assert v1.shape[0] == v2.shape[0], "Both variables must have same shape"
shape = (v1.shape[0], v2.shape[0])
# Get unique variables
variables = [v1] if v1 == v2 else [v1, v2]
else:
raise Exception("v1 and v2 must be the same. They can either be a list of "
"CompositeVariable/Variable or CompositeVariable/Variables themselves.")
if isinstance(full_expr, list):
assert utils.is_square(full_expr), "full_expr must be a square matrix"
return SuperMatSymbol.__new__(cls, shape[0], shape[1], name=name, mat_type='cov',
dep_vars=variables, cond_vars=cond_vars,
blockform=full_expr)
elif isinstance(full_expr, MatrixExpr):
return SuperMatSymbol.__new__(cls, shape[0], shape[1], name=name, mat_type='cov',
dep_vars=variables, cond_vars=cond_vars,
expanded=full_expr)
elif full_expr is None:
return SuperMatSymbol.__new__(cls, shape[0], shape[1], name=name, mat_type='cov',
dep_vars=variables, cond_vars=cond_vars)
else:
raise Exception("Invalid full_expr provided: {}. Must be a list of lists ("
"blockform), MatrixExpr (expanded) or None (no "
"expanded/blockform)".format(full_expr))
def __init__(self,
v1: Union[Variable, CompositeVariable, List[Union[Variable, CompositeVariable]]],
v2: Union[Variable, CompositeVariable, List[Union[Variable, CompositeVariable]]]=None,
cond_vars: List[Union[Variable, CompositeVariable]] = None, name: str='',
full_expr: Union[MatrixExpr, Matrix_t]=None):
"""
Constructor for a Covariance symbol. This only works for distributions where variables aren't
conditioned on others.
:param v1: The first argument of the covariance matrix.
:param v2: The second argument of the covariance matrix. If this isn't specified,
then we set v2 = v1.
:param cond_vars: The optional conditioned-on variables for the (implicit) distribution
that this covariance symbol is a parameter of.
:param name: Optional name for this covariance symbol. If left as is, a name is created
automatically
:param full_expr: The expanded or blockform for this Covariance symbol if it exists.
"""
full_full_expr = utils.expand_to_fullexpr(full_expr) if isinstance(full_expr, MatrixExpr) \
else full_expr
if not full_expr is None and isinstance(full_full_expr, Covariance) and \
full_full_expr.name == self.name:
print("self.name: ", self.name)
print("name: ", name)
return
if v2 is None:
v2 = v1
variables = [v1, v2]
if isinstance(v1, list) and isinstance(v2, list):
shape_v1 = sum([v.shape[0] for v in v1])
shape_v2 = sum([v.shape[0] for v in v2])
#assert shape_v1 == shape_v2, "Both lists of variables must have same shape"
shape = (shape_v1, shape_v2)
elif ((isinstance(v1, Variable) or isinstance(v1, CompositeVariable)) and
(isinstance(v2, Variable) or isinstance(v2, CompositeVariable))):
#assert v1.shape[0] == v2.shape[0], "Both variables must have same shape"
shape = (v1.shape[0], v2.shape[0])
# Get unique variables
variables = [v1] if v1 == v2 else [v1, v2]
else:
raise Exception("v1 and v2 must be the same. They can either be a list of "
"CompositeVariable/Variable or CompositeVariable/Variables themselves.")
if isinstance(full_expr, list):
assert utils.is_square(full_expr), "full_expr must be a square matrix"
SuperMatSymbol.__init__(self, shape[0], shape[1], name=name, mat_type='cov',
dep_vars=variables, cond_vars=cond_vars, blockform=full_expr)
elif isinstance(full_expr, MatrixExpr):
SuperMatSymbol.__init__(self, shape[0], shape[1], name=name, mat_type='cov',
dep_vars=variables, cond_vars=cond_vars, expanded=full_expr)
elif full_expr is None:
SuperMatSymbol.__init__(self, shape[0], shape[1], name=name, mat_type='cov',
dep_vars=variables, cond_vars=cond_vars)
else:
raise Exception("Invalid full_expr provided: {}. Must be a list of lists ("
"blockform), MatrixExpr (expanded) or None (no "
"expanded/blockform)".format(full_expr))
class Constant(SuperMatSymbol):
def __new__(cls, name: str, m: Union[Symbol, int], n: Union[Symbol, int],
full_expr: Union[MatrixExpr, Vector_t, Matrix_t]=None):
if isinstance(full_expr, list):
return SuperMatSymbol.__new__(cls, m, n, name=name, mat_type='other',
blockform=full_expr)
elif full_expr is not None:
return SuperMatSymbol.__new__(cls, m, n, name=name, mat_type='other',
expanded=full_expr)
else:
return SuperMatSymbol.__new__(cls, m, n, name=name, mat_type='other')
def __init__(self, name: str, m: Union[Symbol, int], n: Union[Symbol, int],
full_expr: Union[MatrixExpr, Vector_t, Matrix_t]=None):
"""
Constructor for a Constant symbol.
:param name: The variable name
:param m: The number of rows
:param n: The number of columns
:param full_expr: The detailed expression that this symbol represents. This can be a
standard ``MatrixExpr``, a list of ``MatrixExpr``s (representing a 1-D vector) or a list of
lists of ``MatrixExpr``s (representing a block matrix of matrix expressions)
"""
if isinstance(full_expr, list):
assert utils.is_vector(full_expr) or utils.is_matrix(full_expr), \
"Invalid full_expr list. Must be a 1-D list or if it is 2-D (list of lists), " \
"length of each list must be the same"
SuperMatSymbol.__init__(self, m, n, name=name, mat_type='other', blockform=full_expr)
elif isinstance(full_expr, MatrixExpr):
SuperMatSymbol.__init__(self, m, n, name=name, mat_type='other', expanded=full_expr)
elif full_expr is None:
SuperMatSymbol.__init__(self, m, n, name=name, mat_type='other')
else:
raise Exception("Invalid full_expr provided: {}. Must be a list or list of "
"lists (blockform), MatrixExpr (expanded) or None (no "
"expanded/blockform)".format(full_expr))
```
#### File: symgp/superexpressions/supermatmul.py
```python
from sympy import MatMul, MatAdd, ZeroMatrix, MatrixBase, Identity, ShapeError, MatrixExpr, S, Number
from sympy.core.decorators import call_highest_priority
from sympy.strategies import (rm_id, unpack, typed, flatten, sort, condition, exhaust,
do_one, new, glom)
from .supermatbase import SuperMatBase
class SuperMatMul(SuperMatBase, MatMul):
"""
Redefines some methods of MatMul so as to make them amenable to our application
"""
_op_priority = 10000
def __new__(cls, *args, **kwargs):
return MatMul.__new__(cls, *args, **kwargs)
def as_coeff_mmul(self):
coeff, matrices = self.as_coeff_matrices()
return coeff, SuperMatMul(*matrices)
def _eval_transpose(self):
return SuperMatMul(*[arg.T if isinstance(arg, MatrixExpr) else arg
for arg in self.args[::-1]]).doit()
def _eval_inverse(self):
try:
return SuperMatMul(*[
arg.inverse() if isinstance(arg, MatrixExpr) else arg**-1
for arg in self.args[::-1]]).doit()
except ShapeError:
from .supermatexpr import SuperMatInverse
return SuperMatInverse(self)
#def transpose(self):
# from .supermatexpr import SuperMatTranspose
# return SuperMatTranspose(self).doit()
def doit(self, **kwargs):
deep = kwargs.get('deep', True)
if deep:
args = [arg.doit(**kwargs) for arg in self.args]
else:
args = self.args
return canonicalize(SuperMatMul(*args))
def newmul(*args):
if args[0] == 1:
args = args[1:]
return new(SuperMatMul, *args)
def any_zeros(mul):
if any([arg.is_zero or (arg.is_Matrix and arg.is_ZeroMatrix)
for arg in mul.args]):
matrices = [arg for arg in mul.args if arg.is_Matrix]
return ZeroMatrix(matrices[0].rows, matrices[-1].cols)
return mul
def merge_explicit(matmul):
""" Merge explicit MatrixBase arguments
>>> from sympy import MatrixSymbol, eye, Matrix, MatMul, pprint
>>> from sympy.matrices.expressions.matmul import merge_explicit
>>> A = MatrixSymbol('A', 2, 2)
>>> B = Matrix([[1, 1], [1, 1]])
>>> C = Matrix([[1, 2], [3, 4]])
>>> X = MatMul(A, B, C)
>>> pprint(X)
[1 1] [1 2]
A*[ ]*[ ]
[1 1] [3 4]
>>> pprint(merge_explicit(X))
[4 6]
A*[ ]
[4 6]
>>> X = MatMul(B, A, C)
>>> pprint(X)
[1 1] [1 2]
[ ]*A*[ ]
[1 1] [3 4]
>>> pprint(merge_explicit(X))
[1 1] [1 2]
[ ]*A*[ ]
[1 1] [3 4]
"""
if not any(isinstance(arg, MatrixBase) for arg in matmul.args):
return matmul
newargs = []
last = matmul.args[0]
for arg in matmul.args[1:]:
if isinstance(arg, (MatrixBase, Number)) and isinstance(last, (MatrixBase, Number)):
last = last * arg
else:
newargs.append(last)
last = arg
newargs.append(last)
return SuperMatMul(*newargs)
def xxinv(mul):
""" Y * X * X.I -> Y """
factor, matrices = mul.as_coeff_matrices()
for i, (X, Y) in enumerate(zip(matrices[:-1], matrices[1:])):
try:
if X.is_square and Y.is_square and X == Y.inverse():
I = Identity(X.rows)
return newmul(factor, *(matrices[:i] + [I] + matrices[i+2:]))
except ValueError: # Y might not be invertible
pass
return mul
def remove_ids(mul):
""" Remove Identities from a MatMul
This is a modified version of sympy.strategies.rm_id.
This is necesssary because MatMul may contain both MatrixExprs and Exprs
as args.
See Also
--------
sympy.strategies.rm_id
"""
# Separate Exprs from MatrixExprs in args
factor, mmul = mul.as_coeff_mmul()
# Apply standard rm_id for MatMuls
result = rm_id(lambda x: x.is_Identity is True)(mmul)
if result != mmul:
return newmul(factor, *result.args) # Recombine and return
else:
return mul
def factor_in_front(mul):
factor, matrices = mul.as_coeff_matrices()
if factor != 1:
return newmul(factor, *matrices)
return mul
rules = (any_zeros, remove_ids, xxinv, unpack, rm_id(lambda x: x == 1),
merge_explicit, factor_in_front, flatten)
canonicalize = exhaust(typed({SuperMatMul: do_one(*rules)}))
def only_squares(*matrices):
""" factor matrices only if they are square """
if matrices[0].rows != matrices[-1].cols:
raise RuntimeError("Invalid matrices being multiplied")
out = []
start = 0
for i, M in enumerate(matrices):
if M.cols == matrices[start].rows:
out.append(SuperMatMul(*matrices[start:i+1]).doit())
start = i+1
return out
from sympy.assumptions.ask import ask, Q
from sympy.assumptions.refine import handlers_dict
def refine_SuperMatMul(expr, assumptions):
"""
>>> from sympy import MatrixSymbol, Q, assuming, refine
>>> X = MatrixSymbol('X', 2, 2)
>>> expr = X * X.T
>>> print(expr)
X*X.T
>>> with assuming(Q.orthogonal(X)):
... print(refine(expr))
I
"""
newargs = []
exprargs = []
for args in expr.args:
if args.is_Matrix:
exprargs.append(args)
else:
newargs.append(args)
last = exprargs[0]
for arg in exprargs[1:]:
if arg == last.T and ask(Q.orthogonal(arg), assumptions):
last = Identity(arg.shape[0])
elif arg == last.conjugate() and ask(Q.unitary(arg), assumptions):
last = Identity(arg.shape[0])
else:
newargs.append(last)
last = arg
newargs.append(last)
return SuperMatMul(*newargs)
handlers_dict['SuperMatMul'] = refine_SuperMatMul
from .supermatadd import SuperMatAdd
```
#### File: symgp/utils/utils.py
```python
from __future__ import print_function, division
from typing import Union, Optional, List, Iterable, Dict, Any
from collections import defaultdict
from abc import ABCMeta
import copy
import re
import string
import math
import logging
import sys
import numpy as np
from sympy import (MatMul, MatAdd, Basic, MatrixExpr, MatrixSymbol, ZeroMatrix, Symbol, Identity, Transpose,
Inverse, Number, Rational, ln, Determinant, pi, sympify, srepr, S, Expr, Matrix)
from sympy.printing.latex import LatexPrinter
from sympy.core.evaluate import global_evaluate
from sympy.core.compatibility import iterable, ordered, default_sort_key
# GREEK symbols
SMALL_MU_GREEK = '\u03bc'
BIG_SIGMA_GREEK = '\u03a3'
SMALL_SIGMA_GREEK = '\u03c3'
BIG_OMEGA_GREEK = '\u03a9'
BIG_LAMBDA_GREEK = '\u039b'
SMALL_ETA_GREEK = '\u03b7'
LOG = logging.getLogger(__name__)
LOG.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
LOG.addHandler(ch)
######## Matrix operations with lists as matrices ########
def _mul_with_num(num, mat):
"""
Used by 'matmul'
Multiplies a matrix/vector represented as a list (see 'matmul') with a number.
Args:
num - A number to multiply all elements of mat with
mat - A list/list of lists representing a vector/matrix (see 'matmul')
Returns:
mat of same shape with elements multiplied with num
"""
from symgp.superexpressions import SuperMatMul
if isinstance(mat[0], list):
new_mat = [[SuperMatMul(num,mat[i][j]).doit() for j in range(len(mat[0]))] for i in range(len(mat))]
else:
new_mat = [SuperMatMul(num, v).doit() for v in mat]
return new_mat
def _check_shape_matmul(mat, order):
"""
Checks size of 'mat' and reshapes if necessary.
Args:
mat - A list/list of lists representing a vector/matrix (see 'matmul')
order - Indicates whether mat is a left/right matrix/vector such that
we can broadcast appropriately.
Returns:
(m,n) - Tuple giving the shape of mat
broadcast - Boolean indicating whether we should broadcast list
"""
broadcast_list = False
if isinstance(mat[0],list):
m = len(mat)
n = len(mat[0])
elif order == 'left':
m = 1
n = len(mat)
broadcast_list = True
else: # order == 'right'
m = len(mat)
n = 1
broadcast_list = True
return m, n, broadcast_list
def matmul(list1, list2):
"""
Multiply two lists in a matrix fashion.
Similar to numpy's matrix multiplication of arrays:
- If list1 has shape (m1,) (i.e. it is a 1-D list) it is broadcast to (1,m1).
Here we take the transpose of all the elements as we assume.
list2 must have shapes (m1,n2) or (m1,) otherwise an Exception is raised.
A list of shape (n2,) or (1,) is returned.
- If list2 has shape (m2,) it is broadcast to (m2,1).
list1 must have shapes (m2,) or (m1,m2) otherwise an Exception is raised.
A list of shape (1,) or (m1,) is returned.
- Any other case requires the shapes to match.
For example, we can call this as:
- matmul([[A, B], [C, D]], [a, b])
- matmul([[A, B], [C, D]], [[a], [b]])
All elements (A, B, C, D, a, b) are all SuperMatSymbols where the shapes must match.
Multiplying all elements in a list by a number is also supported e.g. matmul(a,5) or matmul(5,a).
"""
from symgp.superexpressions import SuperMatMul, SuperMatAdd
# Handle multiplication by integers
if isinstance(list1, int):
return _mul_with_num(list1, list2)
if isinstance(list2, int):
return _mul_with_num(list2, list1)
# Check sizes and reshape if necessary
m1, n1, broadcast_list1 = _check_shape_matmul(list1, 'left')
m2, n2, broadcast_list2 = _check_shape_matmul(list2, 'right')
# Check shapes
if n1 != m2:
raise Exception("Shapes don't match: %s, %s" % ((m1, n1), (m2, n2)))
# Multiply based on types of lists
if broadcast_list1 and broadcast_list2: # (1,n1) x (m2,1)
out_list = [SuperMatAdd(*[SuperMatMul(list1[i],list2[i]).doit() for i in range(n1)]).doit()]
elif broadcast_list1: # (1,n1) x (m2,n2)
out_list = [0 for _ in range(n2)]
for i in range(n2):
out_list[i] = SuperMatAdd(*[SuperMatMul(list1[j],list2[j][i]).doit() for j in range(m2)]).doit()
elif broadcast_list2: # (m1,n1) x (m2,1)
out_list = [0 for _ in range(m1)]
for i in range(m1):
out_list[i] = SuperMatAdd(*[SuperMatMul(list1[i][j],list2[j]).doit() for j in range(m2)]).doit()
else: # (m1,n1) x (m2,n2)
out_list = [[0 for _ in range(n2)] for _ in range(m1)]
for i in range(m1):
for j in range(n2):
out_list[i][j] = SuperMatAdd(*[SuperMatMul(list1[i][k],list2[k][j]).doit() for k in range(n1)]).doit()
return out_list
def _check_shape_matadd(mat):
"""
Determines matrix shape of given matrix (as defined in matmul) 'mat'
Args:
mat - A list/list of lists representing a vector/matrix (see 'matmul')
Returns:
m, n - The shape of mat
"""
if isinstance(mat[0],list):
m = len(mat)
n = len(mat[0])
else:
m = 0
n = len(mat)
return m, n
def _assert_shapes(m1,n1,m2,n2):
"""
Checks whether shapes match
"""
if m1 != m2 or n1 != n2:
raise Exception("Shapes don't match: %s, %s" % ((m1, n1), (m2, n2)))
def matadd(list1, list2):
"""
Adds two lists that must be the same shape. We reshape list of (m,) to (0,m).
Returns a list of the same shape as the lists.
"""
from symgp.superexpressions import SuperMatAdd
# Check sizes
m1, n1 = _check_shape_matadd(list1)
m2, n2 = _check_shape_matadd(list2)
# Check shapes match
_assert_shapes(m1,n1,m2,n2)
# Shape out_list based on whether list1 is 1-D.
if m1 == 0:
out_list = [SuperMatAdd(list1[i],list2[i]).doit() for i in range(n1)]
else:
out_list = [[SuperMatAdd(list1[i][j],list2[i][j]).doit() for j in range(n1)] for i in range(m1)]
return out_list
def mattrans(mat):
"""
Returns the transpose of an mxn matrix (list of lists)
Arg:
mat - A list/list of lists representing a vector/matrix (see 'matmul')
Returns:
mat_T - A transpose of shape n x m where mat has shape m x n. If mat has
shape (m,) we simply return mat where each element is the
transpose of its corresponding element in mat.
"""
if all([not isinstance(e,list) for e in mat]): # (m,) case
return [e.T.doit() for e in mat]
else: # Other case
if any([not isinstance(e,list) for e in mat]):
raise Exception("mat is not a regular matrix")
m_T = len(mat[0])
n_T = len(mat)
mat_T = [[mat[j][i].T.doit() for j in range(n_T)] for i in range(m_T)]
return mat_T
def matinv(mat):
"""
Inverts nxn matrices.
Args:
mat - A list/list of lists representing a vector/matrix (see 'matmul') of
shape (n,n)
Returns:
If n > 2, we first partition then apply the algorithm again.
If n == 1, we simply return the SuperMatInverse of the element.
"""
if any([not isinstance(e,list) for e in mat]):
raise Exception("This is not a suitable matrix")
if len(mat) != len(mat[0]):
raise Exception("This isn't a square matrix.")
n = len(mat)
# Recursively calculate the inverse to get the large untidy expression
if n == 1:
return [[mat[0][0].I]]
else:
if n == 2:
P, Q = [[mat[0][0]]], [[mat[0][1]]]
R, S = [[mat[1][0]]], [[mat[1][1]]]
else:
P, Q, R, S = partition_block(mat,[len(mat)-1,len(mat[0])-1])
P_bar = matinv(matadd(P,matmul(matmul(matmul(-1,Q),matinv(S)),R)))
Q_bar = matmul(matmul(matmul(-1,P_bar),Q),matinv(S))
R_bar = matmul(matmul(matmul(-1,matinv(S)),R),P_bar)
S_bar = matadd(matinv(S),matmul(matmul(matmul(matmul(matinv(S),R),P_bar),Q),matinv(S)))
# Create new matrix by top bottom method i.e. create top of matrix then create bottom
top = []
for row1, row2 in zip(P_bar,Q_bar):
top.append(row1+row2)
bottom = []
for row1, row2 in zip(R_bar,S_bar):
bottom.append(row1+row2)
return top+bottom
def _copy_block(block):
"""
Makes a copy of block as used by 'partition_block'
"""
new_block = []
if isinstance(block[0], list):
for row in block:
new_block.append(list(row))
else: #isinstance(block, list)
new_block = list(block)
return new_block
def is_matrix(block):
"""
Returns true if block is a matrix.
A matrix must be a Python list of lists where each list has length
greater than 1 and all lists must be same length
"""
return (all([isinstance(r,list) for r in block]) and all([len(block[0])==len(r) for r in block]))
def is_1d_vector(block):
"""
Returns True if ``block is a 1-d list.
:param block: A list.
"""
return all([not isinstance(e, list) for e in block])
def is_2d_vector(block):
"""
Returns True if ``block is a 2-d list.
:param block: A list.
"""
return all([isinstance(r, list) for r in block]) and all([len(r) == 1 for r in block])
def is_vector(block):
"""
Returns true if block is a vector.
A vector must be:
- A Python list where each element is not a list e.g. [a,b,c]
- A Python list of lists where each list has length 1 e.g. [[a],[b],[c]]
"""
return is_1d_vector(block) or is_2d_vector(block)
def is_square(block):
"""
Determines whether block is a square matrix.
"""
return is_matrix(block) and (len(block[0]) == len(block))
def _move_cols_to_end(block, indices):
"""
Moves the columns given by indices to the end of block
preserving the order of the columns
For example if:
block = [[1, 2, 3, 4],
[5, 6, 7, 8],
[9,10,12,13]]
indices=[1,2]
we get
block = [[1, 4, 2, 3],
[5, 8, 6, 7],
[9,13,10,12]]
"""
num_rows, num_cols = len(block), len(block[0])
indices = sorted(indices,reverse=True)
new_block = _copy_block(block)
for idx, col in enumerate(indices):
if col == num_cols-1:
continue
else:
c = col
# Shifts column to last available column i.e.
while c < num_cols-(idx+1):
for row in range(num_rows):
temp = new_block[row][c]
new_block[row][c] = new_block[row][c+1]
new_block[row][c+1] = temp
c += 1
return new_block
def _move_elems_to_end(block, indices):
"""
Moves the elements in vector 'block' at locations given in 'indices' to the end
of the block whilst preserving the order of the elements
"""
indices = sorted(indices,reverse=True)
block_size = len(block)
new_block = _copy_block(block)
# Push elements corresponding to indices to end of list
for idx, k in enumerate(indices):
if k == block_size-1:
continue
else:
i = k
while i < block_size-(idx+1):
temp = new_block[i]
new_block[i] = new_block[i+1]
new_block[i+1] = temp
i += 1
return new_block
def partition_block(block, indices):
"""
Partitions a list into four or two sections based on the indices
Args:
block - The input block to be partitioned:
- If block is 2-D, we partition it into [[P, Q], [R, S]]
- If block is 1-D, we partition it into [a, b] (shape=(m,)) or
[[a],[b]] (shape=(m,1))
indices - The indices that form one partition:
- If block is 2-D, this can be 2-D (e.g. [[1,2],[0,1]]) or 1-D (e.g. [1,2,3]).
If 1-D, block needs to be square.
- If block is 1-D, this should also be a vector (e.g. [1,2,3] or [[1],[2],[3]])
Repeat indices are removed automatically. The order of the columns/rows are
preserved.
For example for block = [[A,B,C,D],[E,F,G,H],[I,J,K,L],[M,N,O,Z]] and
indices = [[0,2],[1,3]], we get:
P = [[E,G],[M,O]] Q = [[F,H],[N,Z]]
R = [[A,C],[I,K]] S = [[B,D],[J,L]]
Returns:
Either
P, Q, R, S - The four partitions for 2-D blocks
Or
a, b - The two partitions for 1-D blocks
"""
# Checks validity of indices values
_is_valid_idx = lambda idx, max_idx: all([(i >= 0 and i < max_idx) for i in idx])
# Check block is a correct block matrix/vector ([[...],[...]] or [...])
if not (is_matrix(block) or is_vector(block)):
raise Exception("The block to be partitioned must be a matrix ([[A,B], [C,D]]) or \
vector ([a,b] or [[a],[b]])")
# Copy block
new_block = _copy_block(block)
if is_matrix(new_block) and not is_vector(new_block):
num_rows, num_cols = len(new_block), len(new_block[0])
# Check indices are appropriate for matrix
if (all([isinstance(e,int) for e in indices]) and is_square(new_block)):
indices = [indices, indices] # Convert to 2-D
else:
if not all([isinstance(e,list) for e in indices]):
raise Exception("Incorrect form for indices for a matrix. Must be a list of lists e.g.\
[[1,2],[3]] or a 1-D list [1,2] if the matrix is square")
# Remove repeat set of indices
row_indices = list(set(indices[0]))
col_indices = list(set(indices[1]))
# Check for 1x1 case
if num_rows == 1 and num_cols == 1:
raise Exception("Can't partition a 1x1 block. Minimum size is 2x2")
# Check that all indices are in appropriate range
if not _is_valid_idx(row_indices,num_rows):
raise Exception("Invalid row indices. Must be in range: [%s,%s]" % (0,num_rows-1))
if not _is_valid_idx(col_indices,num_cols):
raise Exception("Invalid column indices. Must be in range: [%s,%s]" % (0,num_cols-1))
# First push columns indicated by indices to end
new_block = _move_cols_to_end(new_block, col_indices)
# Do same for rows
new_block = list(map(list,zip(*new_block))) # Flip rows and columns
new_block = _move_cols_to_end(new_block, row_indices)
new_block = list(map(list,zip(*new_block)))
m = num_rows - len(row_indices) # Number of rows of partition not given by indices
n = num_cols - len(col_indices) # Number of columns of partition not given by indices
# Create partitions
P = [new_block[i][:n] for i in range(m)] # No row and col indices
Q = [new_block[i][n:] for i in range(m)] # No row but col indices
R = [new_block[i][:n] for i in range(m, num_rows)] # No col but row indices
S = [new_block[i][n:] for i in range(m, num_rows)] # Intersection of row and col indices
return P, Q, R, S
else: # Vector
block_size = len(new_block)
# Check for 1x1 case
if block_size == 1:
raise Exception("Can't partition a 1x1 block")
# Check indices are appropriate for vector
if is_vector(indices):
if all([isinstance(e,list) for e in indices]): # Convert to 1-D list
indices = [e[0] for e in indices]
else:
raise Exception("Incorrect form of indices. Must be 1-D e.g. [1,2]")
# Check that all indices are in appropriate range
if not _is_valid_idx(indices,block_size):
raise Exception("Invalid indices. Must be in range: [%s,%s]" % (0,block_size-1))
# Remove duplicates
indices = list(set(indices))
new_block = _move_elems_to_end(new_block,indices)
# Partition
m1 = block_size - len(indices)
a = new_block[:m1]
b = new_block[m1:]
return a, b
######## MVG helper functions ########
def get_logZ(cov):
"""
Calculates log-normalising constant symbol using cov
"""
return -cov.shape[0]/2*ln(2*pi) - Rational(1,2)*ln(Determinant(cov))
######### Search and replace functions ########
def replace_with_num(expr, d):
"""
Replaces matrix symbols with numerical matrices using a DFS search through the
expression tree.
Args:
- 'expr': The expression which we want to evaluate.
- 'd': A dictionary mapping the matrix symbols to numerical matrices (these can be
SymPy 'Matrix' objects or 'numpy.ndarray' arrays).
Returns:
- A 'numpy.ndarray' that is the evaluation of the expr with the numerical
matrices.
"""
import numpy as np
# Determine what to return based on type(expr)
if isinstance(expr, MatrixSymbol):
try:
return d[expr.name]
except KeyError as e:
print("Error: No numerical matrix was specified for %s" % (e))
elif isinstance(expr, Number):
return expr
elif isinstance(expr, MatrixExpr):
sub_exprs = []
for arg in expr.args:
sub_exprs.append(replace_with_num(arg, d))
if expr.is_MatMul:
for e in sub_exprs:
if not isinstance(e,Number):
shape = e.shape[0]
break
out = np.eye(shape)
for e in sub_exprs:
if isinstance(e,Number):
out *= np.float(e)
elif isinstance(e,Matrix):
out = np.dot(out,np.array(e.tolist(),dtype=np.float32))
else:
out = np.dot(out,e)
return out
elif expr.is_MatAdd:
if len(sub_exprs[0].shape) == 2:
out = np.zeros(sub_exprs[0].shape)
else:
out = np.zeros(sub_exprs[0].shape[0])
for e in sub_exprs:
if isinstance(e,Matrix):
out += np.array(e.tolist(),dtype=np.float32).reshape(out.shape)
else:
out += e.reshape(out.shape)
return out
elif expr.is_Inverse:
if isinstance(sub_exprs[0],Matrix):
out = np.linalg.inv(np.array(sub_exprs[0].tolist(),dtype=np.float32))
else:
out = np.linalg.inv(sub_exprs[0])
return out
else: # expr.is_Transpose
if isinstance(sub_exprs[0],Matrix):
out = np.array(sub_exprs[0].T.tolist(),dtype=np.float32)
else:
out = sub_exprs[0].T
return out
else:
raise Exception("Expression should be a MatrixExpr")
def evaluate_expr(expr, d):
"""
Evaluates a matrix expression with the given numerical matrices
Args:
- 'expr' - The symbolic matrix expression
- 'd' - A dictionary mapping the matrix symbols to numerical matrices
Returns:
- 'r' - The result of all the matrix calculations
"""
r = replace_with_num(expr, d)
return r
def replace_with_expanded(expr, done=True, excluded=None):
"""
Similar to 'replace_with_num' above except we replace SuperMatrixSymbols
with their expanded forms if they exist
Args:
expr - The current MatrixExpr
Returns:
expr - The expanded MatrixExpr
done - Boolean indicating whether no more expansions can be done
"""
from symgp.superexpressions import (SuperMatSymbol, SuperMatTranspose, SuperMatInverse, SuperMatAdd,
SuperMatMul, SuperDiagMat, SuperBlockDiagMat)
from symgp.kernels.kernel import KernelMatrix
if (isinstance(expr, MatMul) or isinstance(expr, MatAdd) or
isinstance(expr, Inverse) or isinstance(expr, Transpose)):
sub_exprs = []
for arg in expr.args:
expanded, done = replace_with_expanded(arg, done, excluded=excluded)
sub_exprs.append(expanded)
if expr.is_MatMul:
e = SuperMatMul(*sub_exprs)
elif expr.is_MatAdd:
e = SuperMatAdd(*sub_exprs)
elif expr.is_Inverse:
e = SuperMatInverse(*sub_exprs)
else: # expr.is_Transpose
e = SuperMatTranspose(*sub_exprs)
return e, done
elif excluded is not None and any([isinstance(expr, c) for c in excluded]):
return expr, done
elif isinstance(expr, SuperMatSymbol) and expr.expanded is not None:
return expr.expanded, False
else:
return expr, done
def expand_to_fullexpr(expr, num_passes=-1, excluded=None):
"""
Expands a MatrixExpr composed of SuperMatSymbols by substituting any SuperMatSymbol
with an 'expanded'
Args:
expr - The expression to expand
num_passes - The number of passes to make through the expression. -1 indicates that
we pass through expression until no more substitutions can be made.
excluded - The classes (and its subclasses) to exclude substitution with expanded
expressions for.
Return:
e - The expanded expression
"""
e = expr
# Keep on passing through expression until no more substitutions can be made
if num_passes == -1:
done = False
while not done:
done = True
e, done = replace_with_expanded(e, done, excluded=excluded)
else:
for _ in range(num_passes):
e, _ = replace_with_expanded(e, excluded=excluded)
return e.doit().doit()
def _replace_with_MatSym(expr, rule):
"""
Replaces the MatrixExpr expression in 'expr' given by the replacement rule
Args:
expr - The expression which we want to replace sub-expressions in.
rule - A tuple that matches the old expression (old_expr) to the replacement (repl) as
(old_expr, repl)
Returns:
subbed_expr - 'expr' with the substitution made
"""
from collections import deque
from symgp import SuperDiagMat, SuperBlockDiagMat
old_expr, repl = rule
len_old_expr = len(old_expr.args) # Number of arguments. TODO: Check for cases where k is a single symbol
# Table used to build back tree.
#
# We pair a key of a sub_expression with an id 'k' that indicates sub_expr was the k'th entry into the table with either:
#
# - A list of (sub_expr.args[i], k) tuples indicating the keys from which to search for the
# next expressions in the tree in their correct order:
#
# {(sub_expr, j): [(sub_expr.args[0],m),(sub_expr.args[1],l), ...]}
#
# - A Expr that we substitute in for sub_expr when it is retrieved by higher nodes in the expression tree:
#
# {(sub_expr, j): sub_expr_repl}
#
# where sub_expr_repl is the expression that we replace sub_expr with. It can be sub_expr itself or a replacement
# we define.
tree_table = defaultdict(list)
queue = deque(((expr, 0, 0),))
#tree_table[(full_expr,0)] = list(zip(list(full_expr.args),[1]*len(full_expr.args)))
curr_id = 1 # An id to uniquely identify each sub-expression i.e. we can have similar expressions at the same level
while len(queue) > 0:
sub_expr, level, old_id = queue.pop()
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
# Match current rule to expressions in this sub expression
len_sub_expr = len(sub_expr.args)
i = 0
while i < len_sub_expr:
j = 0
l = 0 # Used when we need to skip over symbols e.g. for addition where we may need to match a subset of args.
matched, skipped = _match_with_pat(sub_expr,i,old_expr)
if matched: # Match found: Replace match with pattern
# Determine the level of the new replacement expression in the expression tree
if len_old_expr == len_sub_expr:
new_level = level
else:
new_level = level + 1
queue.appendleft((repl, new_level, curr_id))
# We need to re-order sub_expr - mainly for matches in MatAdds with remainders e.g. matching A in A + B + C
if skipped:
old_sub_expr = sub_expr
# Get remainder after removing old_expr
rem = sub_expr
for c in old_expr.args:
rem -= c
rem = [rem] if not isinstance(rem,MatAdd) else list(rem.args)
# Create new expression
new_args = list(old_expr.args) + rem
sub_expr = type(sub_expr)(*new_args)
# As we changed the sub_expr we have to reassign the elements of the old one
if tree_table.get((old_sub_expr, level, old_id)):
old_values = tree_table.pop((old_sub_expr, level, old_id))
tree_table[(sub_expr, level, old_id)] = old_values + [(repl, new_level, curr_id)]
else:
tree_table[(sub_expr, level, old_id)] = [(repl, new_level, curr_id)]
curr_id += 1
else:
# Check entry for sub_expr exists
tree_table[(sub_expr, level, old_id)].append((repl, new_level, curr_id))
curr_id += 1
# Start after pattern
i += len_old_expr
else:
queue.appendleft((sub_expr.args[i], level+1, curr_id))
# Check entry for sub_expr exists
tree_table[(sub_expr, level, old_id)].append((sub_expr.args[i], level+1, curr_id))
curr_id += 1
# Start at next symbol
i += 1
else:
# Add expression for this node
tree_table[(sub_expr, level, old_id)] = sub_expr
# Sort based on level in descending order
sorted_tree_table = sorted(tree_table.items(), key=lambda elem: elem[0][1], reverse=True)
# Create expression from table
for p, c in sorted_tree_table:
# Skip terminal nodes else update tree table for non-terminal nodes
if p[0] == c:
continue
else:
# Create MatrixExpr using the elements in the value c, which is a list, for the key p and
# then update 'tree_table'
tree_table[p] = type(p[0])(*[tree_table[e] for e in c])
# Rewrite full expression
subbed_expr = tree_table[sorted_tree_table[-1][0]]
return subbed_expr
def _match_with_pat(expr, start, pat):
"""
Matches an expression or a portion of it to a pattern.
Args:
expr - The expression we want to match.
start - The starting index into expr
pat - The pattern we want to find in 'expr'. This can be:
- A MatrixExpr. Here we aim to find pat in 'expr'
- A Kernel. We aim to find KernelMatrix objects/MatrixExprs
composed of KernelMatrix objects that match Kernel
Returns:
matched - Indicates whether the pattern was found in 'expr'.
skipped - Indicates whether we had to skip over symbols when matching
in a MatAdd expression.
(Optional)
pattern - The pattern that we match. Only returned for when pat is a Kernel
repl - The replacement expression. Only returned for when pat is a Kernel
Examples:
- expr = A*B*D, pat = A*B -> matched = True, skipped = False
- expr = A + B + C, pat = A + C -> matched = True, skipped = True (as we had to skip over B)
Note that 'skipped' is determined based on the order of expr.args.
- expr = K(a,u)*K(u,u)*K(u,b), pat = Q (Q.sub_kernels=[K,K], Q.M=K(u,u)) -> matched = True, skipped = True
(We match the whole expression with Q), pattern = K(a,u)*K(u,u)*K(u,b), repl = Q(a,b)
"""
from symgp import Kernel
len_expr = len(expr.args)
matched, skipped = False, False
#print("Pat: ", pat, type(pat))
#print("Expr: ", expr)
if isinstance(pat, MatrixExpr):
if isinstance(pat, MatrixSymbol):
from symgp import SuperMatMul
pat = SuperMatMul(pat)
#print("Expr: ", expr)
len_pat = len(pat.args)
j = 0
l = 0
while j < len_pat and start + l + j < len_expr:
if start + l + j >= len_expr:
break
#print("Current expr: ", expr.args[start+l+j])
#print("current pat: ", pat.args[j])
if (expr.args[start+l+j].doit() != pat.args[j].doit()):# or (sub_expr.args[i+l+j].match(k.args[j])):
#print("Not matched")
#foundMatch = False
# As additions may be stored in any order, we need to skip symbols so that we can match
# the pattern
if isinstance(pat, MatAdd) and isinstance(expr, MatAdd):
l += 1
else:
break
else:
#print("Matched")
j += 1
if j == len_pat:
matched = True
#print("Matched full expr")
if l > 0:
skipped = True
return matched, skipped
elif isinstance(pat, Kernel):
#print("Kernel pat: ", pat)
kern_vars = get_all_kernel_variables(expr)
# Get all possible kernel patterns
patterns = []
#print("kern_vars: ", kern_vars)
for v1 in kern_vars:
patterns.extend([pat(v1,v2) for v2 in kern_vars])
# Sort patterns based on length of underlying expression
def sort_func(e):
e_full = e.to_full_expr()
if isinstance(e_full, MatrixSymbol):
return 1
else:
return len(e_full.args)
patterns = sorted(patterns, key=sort_func)[::-1]
#print("patterns: ", patterns)
# Find a match in our list of patterns
for i, p in enumerate(patterns):
#print("p: ",p," patterns[i]: ",patterns[i])
#print("p.expanded: ", p.expanded)
#print("p.to_full_expr(): ", p.to_full_expr())
#print("expr, start: ", expr, start)
#print("patterns: ", patterns)
#print("matched_pat: ", p.to_full_expr())
matched, skipped = _match_with_pat(expr, start, p.to_full_expr())
#print("matched, skipped, matched_pat, repl: ", matched, skipped, p.to_full_expr(), p)
if matched:
return matched, skipped, p.to_full_expr(), p
return matched, skipped, None, None
else:
raise Exception("Invalid pattern 'pat': Must be a Kernel object or a MatrixExpr")
def _replace_with_Kernel(expr, kern):
"""
Replaces the kernel expression in 'expr' given by the replacement rule
Args:
expr - The expression which we want to replace sub-expressions in.
kern - The Kernel we want to replace an expression in 'expr' with. The expression
belongs to the set of expression 'kern' represents.
For example, if
M = Constant('M',n,n,full_expr=K(u,u).I)
kern = Kernel(sub_kernels=[K,K],kernel_type='mul',mat=M,name='Q')
we replace all expressions of form K({v1},u)*K(u,u).I*K(u,{v2}) where {v1} and {v2} can be
any variable.
Returns:
subbed_expr - 'expr' with the substitution made
"""
from collections import deque
from symgp import Kernel, SuperDiagMat, SuperBlockDiagMat
# Table used to build back tree.
#
# We pair a key of a sub_expression with an id 'k' that indicates sub_expr was the k'th entry in the table with either:
#
# - A list of (sub_expr.args[i], k) tuples indicating the keys from which to search for the
# next expressions in the tree in their correct order:
#
# {(sub_expr, j): [(sub_expr.args[0],m),(sub_expr.args[1],l), ...]}
#
# - A Expr that we substitute in for sub_expr when it is retrieved by higher nodes in the expression tree:
#
# {(sub_expr, j): sub_expr_repl}
#
# where sub_expr_repl is the expression that we replace sub_expr with. It can be sub_expr itself or a replacement
# we define.
tree_table = defaultdict(list)
if isinstance(kern,Kernel):
queue = deque(((expr, 0, 0),)) # Tuples of (expression, tree level, expression id)
curr_id = 1 # An id to uniquely identify each sub-expression i.e. we can have similar expressions at the same level
while len(queue) > 0:
sub_expr, level, old_id = queue.pop()
#print("sub_expr: {}, level: {}, old_id: {}".format(sub_expr, level, old_id))
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
# TODO: Add functionality to replace expressions such as A+D in D + 2*A.
len_sub_expr = len(sub_expr.args)
i = 0
while i < len_sub_expr:
matched, skipped, pattern, repl = _match_with_pat(sub_expr, i, kern)
#print("i: ", i)
#print("Matched: {}, Skipped: {}, Pattern: {}, Repl: {}".format(
# matched, skipped, pattern, repl))
# Update 'tree_table'
if matched: # Match found: Replace match with pattern
# Determine the level of the new replacement expression in the expression tree
if len(pattern.args) == len_sub_expr:
new_level = level
else:
new_level = level + 1
queue.appendleft((repl, new_level, curr_id))
# We need to re-order sub_expr - mainly for matches in MatAdds with
# remainders e.g. matching A in A + B + C
if skipped:
old_sub_expr = sub_expr
# Get remainder after removing old_expr
rem = sub_expr
for c in pattern.args:
rem -= c
rem = [rem] if not isinstance(rem,MatAdd) else list(rem.args)
# Create new expression
new_args = list(pattern.args) + rem
sub_expr = type(sub_expr)(*new_args)
# As we changed the sub_expr we have to reassign the elements of the old one
if tree_table.get((old_sub_expr, level, old_id)):
old_values = tree_table.pop((old_sub_expr, level, old_id))
tree_table[(sub_expr, level, old_id)] = old_values + [(repl, new_level, curr_id)]
else:
tree_table[(sub_expr, level, old_id)] = [(repl, new_level, curr_id)]
else:
# Check entry for sub_expr exists
tree_table[(sub_expr, level, old_id)].append((repl, new_level, curr_id))
#print("Key: {}, Val: {}".format((sub_expr, level, old_id),
# (repl, new_level, curr_id)))
# Start after pattern
if isinstance(pattern, MatrixSymbol):
i += 1
else:
i += len(pattern.args)
else:
#print("Key: {}, Val: {}".format((sub_expr, level, old_id),
# (sub_expr.args[i], level+1, curr_id)))
queue.appendleft((sub_expr.args[i], level+1, curr_id))
# Check entry for sub_expr exists
tree_table[(sub_expr, level, old_id)].append((sub_expr.args[i], level+1, curr_id))
# Start at next symbol
i += 1
curr_id += 1
else:
# Add expression for this node
tree_table[(sub_expr, level, old_id)] = sub_expr
else:
raise Exception("Invalid 'old_expr': Should be a Kernel, MatMul or MatAdd object")
# Sort based on level in descending order
sorted_tree_table = sorted(tree_table.items(), key=lambda elem: elem[0][1], reverse=True)
#print("\n")
#for ele in sorted_tree_table:
# print(ele)
#print("\n")
# Create expression from table
for p, c in sorted_tree_table:
# Skip terminal nodes else update tree table for non-terminal nodes
if p[0] == c:
continue
else:
# Create MatrixExpr using the elements in the value c, which is a list, for the key p and
# then update 'tree_table'
tree_table[p] = type(p[0])(*[tree_table[e] for e in c])
#print("p: {}, tree_table[p]: {}".format(p, tree_table[p]))
subbed_expr = tree_table[sorted_tree_table[-1][0]]
return subbed_expr
def replace(expr, rules):
"""
Replaces expressions in expr with the given rules.
Args:
expr - The input expression
rules - A list where elements can be:
- A tuple matching an old MatrixExpr to a new MatSym, or
- A Kernel object that has an underlying representation of matrix expressions
to match e.g.
M = Constant('M',n,n,full_expr=K(u,u).I)
Q = Kernel(sub_kernels=[K,K],kernel_type='mul',mat=M,name='Q')
matches expressions of the form 'K({v1},u)*K(u,u).I*K(u,{v2})' where {v1} and {v2} can be
any variable
N.B. For an expression of the form -1*A we must replace it with another expression
of the form -1*B and not A with B.
Returns:
The expression with the substitutions made.
"""
from symgp import Kernel
# Get the full expression
#full_expr = expand_to_fullexpr(expr)
full_expr = expr
# For each substitution rule, replace the corresponding sub-expression
for r in rules:
if isinstance(r,Kernel):
full_expr = _replace_with_Kernel(full_expr, r)
elif isinstance(r,tuple) and isinstance(r[0],MatrixExpr) and isinstance(r[1],MatrixSymbol):
full_expr = _replace_with_MatSym(full_expr, r)
else:
raise Exception("Invalid matching of expressions to replacements. Rule must be (old_expr,repl) or kern")
return full_expr.doit()
def replace_with_SuperMat(expr, d):
"""
Similar to replace_with_num above except we replace symbols with the
corresponding SuperMatExpr symbols
"""
from symgp.superexpressions import SuperMatMul, SuperMatAdd, SuperMatInverse, SuperMatTranspose
if isinstance(expr, Symbol) or isinstance(expr, Number):
if isinstance(expr, Symbol):
try:
return d[expr.name]
except KeyError as e:
print("Error: No SuperMatSymbol substitute was specified for %s" % (e))
else:
return expr
r = []
for arg in expr.args:
r.append(replace_with_SuperMat(arg, d))
if isinstance(expr, Expr):
if expr.is_Mul:
return SuperMatMul(*r)
elif expr.is_Add:
return SuperMatAdd(*r)
elif expr.is_Inverse:
return SuperMatInverse(*r)
else: # expr.is_Transpose
return SuperMatTranspose(*r)
else:
raise Exception("Expression should be a MatrixExpr")
######## LaTeX printing ########
class matLatPrinter(LatexPrinter):
def _print_Symbol(self, expr):
if expr.name[0] == SMALL_SIGMA_GREEK:
return self._print(Symbol('\sigma'+expr.name[1:]))
else:
return LatexPrinter().doprint(expr)
def _print_SuperMatSymbol(self, expr):
mat_type = expr.mat_type
#print("mat_type: ",mat_type)
#print("expr: ",expr)
name = expr.name
"""if (mat_type == 'mean' or mat_type == 'covar' or mat_type == 'invcovar' or
mat_type == 'natmean' or mat_type == 'precision'):
dep_vars = expr.dep_vars
cond_vars = expr.cond_vars
if mat_type == 'mean':
if not isinstance(dep_vars[0],list):
name = '\mu_{'+','.join([self._print(v) for v in dep_vars])
else:
dep_vars = list(set(dep_vars[0]).union(set(dep_vars[1])))
name = '\mu_{'+','.join([self._print(v) for v in dep_vars])
elif mat_type == 'covar':
if not isinstance(dep_vars[0],list):
name = '\Sigma_{'+','.join([self._print(v) for v in dep_vars])
else:
dep_vars = list(set(dep_vars[0]).union(set(dep_vars[1])))
name = '\Sigma_{'+','.join([self._print(v) for v in dep_vars])
elif mat_type == 'invcovar':
if not isinstance(dep_vars[0],list):
name = '\Sigma^{-1}_{'+','.join([self._print(v) for v in dep_vars])
else:
dep_vars = list(set(dep_vars[0]).union(set(dep_vars[1])))
name = '\Sigma^{-1}_{'+','.join([self._print(v) for v in dep_vars])
elif mat_type == 'natmean':
if not isinstance(dep_vars[0],list):
name = '\eta_{1,'+','.join([self._print(v) for v in dep_vars])
else:
dep_vars = list(set(dep_vars[0]).union(set(dep_vars[1])))
name = '\eta_{1,'+','.join([self._print(v) for v in dep_vars])
else: # mat_type == 'precision'
if not isinstance(dep_vars[0],list):
name = '\eta_{2,'+','.join([self._print(v) for v in dep_vars])
else:
dep_vars = list(set(dep_vars[0]).union(set(dep_vars[1])))
name = '\eta_{2,'+','.join([self._print(v) for v in dep_vars])
if len(cond_vars) > 0:
name += '|'+','.join([self._print(v) for v in cond_vars])
name += '}'
return name
else: # Just use the Symbol converted latex form
if expr.name[-2:] == '_s':
return r'\mathbf{'+expr.name[:-2]+'}_{*}'
else:
return r'\mathbf{'+expr.name+'}'"""
return r'\mathbf{' + expr.name + '}'
def _print_SuperMatInverse(self, expr):
return self._print(expr.args[0]) +'^{-1}'
def _print_SuperMatTranspose(self, expr):
return self._print(expr.args[0]) +'^T'
def _print_SuperDiagMat(self, expr):
return r"\text{diag}["+self._print(expr.arg)+"]"
def _print_SuperBlockDiagMat(self, expr):
return r"\text{blockdiag}["+self._print(expr.arg)+"]"
def _print_SuperMatAdd(self, expr):
terms = list(expr.args)
# Fix to stop first negative term being rendered as -1 in LaTex i.e. we want
# A - BCB^{T} in LaTex instead of -1BCB^{T} + A
if terms[0].args[0] == S.NegativeOne:
terms = terms[1:] + terms[:1]
tex = " + ".join(map(self._print, terms))
return tex
def _print_MVG(self, expr):
from symgp.kernels.kernel import KernelMatrix
excluded_classes = [KernelMatrix]
# Form MVG name
latex_name = r'\begin{align*}' + "\n"
latex_name += expr.prefix+r'\left('
vars_name_pre = ','.join([self._print(v) for v in expr.variables]) # Name of vars
if len(expr.cond_vars) > 0:
vars_name_pre += '|'+','.join([self._print(v) for v in expr.cond_vars])
latex_name += vars_name_pre + r'\right)'
# N(mean, covar)
latex_name += r'&= \mathcal{N}\left('
if len(expr.variables) > 1:
vars_name_N = r'\left[\begin{smallmatrix}'
for i in range(len(expr.variables)-1):
vars_name_N += self._print(expr.variables[i])+r'\\'
vars_name_N += self._print(expr.variables[-1])+r'\end{smallmatrix}\right]'
# Mean
mean_short_name = r'\mathbf{m}_{'+vars_name_pre+r'}'
if expr.mean.blockform is not None:
mean_name = r'\left[\begin{smallmatrix}'
for i in range(len(expr.mean.blockform)-1):
mean_name += self._print(expand_to_fullexpr(
expr.mean.blockform[i], excluded=excluded_classes).doit())+r'\\'
mean_name += self._print(expand_to_fullexpr(
expr.mean.blockform[-1], excluded=excluded_classes).doit())+r'\end{smallmatrix}\right]'
# Covariance
covar_short_name = r'\mathbf{\Sigma}_{'+vars_name_pre+r'}'
if expr.covar.blockform is not None:
covar_name = r'\left[\begin{smallmatrix}'
for i in range(len(expr.covar.blockform)-1):
for j in range(len(expr.covar.blockform[i])-1):
covar_name += self._print(expand_to_fullexpr(
expr.covar.blockform[i][j], excluded=excluded_classes).doit())+r'&'
covar_name += self._print(expand_to_fullexpr(
expr.covar.blockform[i][-1], excluded=excluded_classes).doit())+r'\\'
# Add last row
for j in range(len(expr.covar.blockform[-1])-1):
covar_name += self._print(expand_to_fullexpr(
expr.covar.blockform[-1][j], excluded=excluded_classes).doit())+r'&'
covar_name += self._print(expand_to_fullexpr(
expr.covar.blockform[-1][-1], excluded=excluded_classes).doit())+r'\end{smallmatrix}\right]'
# Write shortened distribution expression
latex_name += vars_name_N + r';' + mean_short_name + r',' + covar_short_name + r'\right)\\'+"\n"
else:
mean_short_name = r'\mathbf{m}_{'+vars_name_pre+r'}'
mean_name = self._print(expand_to_fullexpr(
expr.mean.expanded, excluded=excluded_classes).doit()) if expr.mean.expanded is not None else ''
covar_short_name = r'\mathbf{\Sigma}_{'+vars_name_pre+r'}'
covar_name = self._print(expand_to_fullexpr(
expr.covar.expanded, excluded=excluded_classes).doit()) if expr.covar.expanded is not None else ''
# Write shortened distribution expression
var_name_N = self._print(expr.variables[0])
latex_name += var_name_N + r';' + mean_short_name+r','+covar_short_name+r'\right)\\' + "\n"
# Add full expressions for mean and covariance below
if mean_name != '' and covar_name != '':
latex_name += mean_short_name + r' &= ' + mean_name + r'\\' + "\n" + \
covar_short_name + r' &= ' + covar_name + r'\\' + "\n"
latex_name += r'\end{align*}'
return latex_name
def _print_Identity(self, expr):
return r'\mathbf{I}'
#def _print_NegativeOne(self, expr):
# return r'-'
def _print_ZeroMatrix(self, expr):
return r'\mathbf{0}'
def _print_KernelMatrix(self, expr):
latex_name = r'\mathbf{'+expr.K.name+'}_{'+matLatex(expr.inputs[0])+','+\
matLatex(expr.inputs[1])+'}'
return latex_name
def matLatex(expr, profile=None, **kwargs):
"""
Returns the LaTeX code for the given expression
"""
if profile is not None:
profile.update(kwargs)
else:
profile = kwargs
out_latex = matLatPrinter(profile).doprint(expr)
#Clean up string
out_latex = re.sub('(\+.\-1)','-',out_latex) # Change '+ -1' to '-'
return out_latex
def updateLatexDoc(filename, expr):
"""
Updates the latex filename with the given expression.
This function is mainly used to typeset the LaTeX code that is produced by calling
utils.matLatex(expr).
We append the expression to the list of 'dmath' environments from the breqn package.
For MVGs we also display the full expressions for the mean and covariance below the
expression for the distribution.
Args:
filename - The '.tex' file to which we write the LaTeX code.
expr - The expression (or list of expressions) for which we want to generate LaTeX.
This can be any native SymPy expression (and the subclasses in this library)
or an MVG.
"""
import subprocess
from MVG import MVG
with open(filename,'r+') as f:
contents = f.read()
split_contents = re.split(r"(.+\\begin\{document\}\n)(.+)(\\end\{document\}.*)", contents, flags=re.DOTALL)
edited_content = split_contents[2]
if edited_content == '\n':
edited_content = ''
if not isinstance(expr, list):
expr = [expr]
for e in expr:
# Write our expression to the end of the file
if isinstance(e, MVG):
edited_content += r'\section{$'+ matLatex(e.name) + r'$}' + "\n"
edited_content += r'\begingroup\makeatletter\def\f@size{12}\check@mathfonts'+ "\n" + \
r'\def\maketag@@@#1{\hbox{\m@th\large\normalfont#1}}'+ "\n"
edited_content += matLatex(e)
edited_content += r'\endgroup'+ "\n\n"
else:
edited_content += r'\section{expression}' + "\n"
edited_content += "\\begin{align*}\n"
edited_content += matLatex(e)
edited_content += "\n\\end{align*}\n"
split_contents[2] = edited_content
f.seek(0)
f.write(''.join(split_contents))
f.truncate()
subprocess.check_call(["latexmk", "-pdf",str(filename)])
subprocess.check_call(["open", filename.split(".")[0]+".pdf"])
######## Expression conversion functions ########
def expand_mat_sums(sums):
"""
Helper method for 'expand_matmul'
Based on 'def _expandsums' in sympy.core.mul
"""
from symgp.superexpressions.supermatadd import SuperMatAdd, SuperMatMul
L = len(sums)
if L == 1:
return sums[0]
terms = []
left = expand_mat_sums(sums[:L//2]).args
right = expand_mat_sums(sums[L//2:]).args
terms = [a*b for a in left for b in right]
added = SuperMatAdd(*terms)
return added
def expand_matmul(expr):
"""
Expands MatMul objects e.g. C*(A+B) -> C*A + C*B
Based on 'def _eval_expand_mul' in sympy.core.mul
"""
from symgp.superexpressions import SuperMatAdd
sums, rewrite = [], False
for factor in expr.args:
if isinstance(factor, MatrixExpr) and factor.is_MatAdd:
sums.append(factor)
rewrite = True
else:
sums.append(Basic(factor))
if not rewrite:
return expr
else:
if sums:
terms = expand_mat_sums(sums).args
args = []
for term in terms:
t = term
if isinstance(t,MatrixExpr) and t.is_MatMul and any(a.is_MatAdd if isinstance(a,MatrixExpr) else False for a in t.args):
t = expand_matmul(t)
args.append(t)
return SuperMatAdd(*args).doit()
else:
return expr
def expand_matexpr(expr):
"""
Expands matrix expressions (MatrixExpr)
"""
from symgp.superexpressions import SuperMatAdd
if expr.is_MatAdd:
args = []
args.extend([expand_matexpr(a) if a.is_MatMul else a for a in expr.args])
return SuperMatAdd(*args).doit()
elif expr.is_MatMul:
return expand_matmul(expr).doit()
else:
return expr.doit()
def collect(expr, syms, muls, evaluate=None):
"""
Collect additive terms of a matrix expression
Adapted from 'collect' function in SymPy library (https://github.com/sympy/sympy/blob/master/sympy/simplify/radsimp.py)
Args:
expr - The expression to collect terms for
syms + muls - List of 1 or 2 symbols corresponding to order of multiplication indicators in 'muls'.
e.g. syms=[B,A],muls=['left','right'] corresponds to collecting terms for expressions
of the form B*{W1}*A + B*{W2}*A + {W3} where {W1}, {W2} and {W3} are matrix
expressions to give B*({W1} + {W2})*A + {W3}
"""
from symgp.superexpressions import SuperMatMul, SuperMatAdd
if not isinstance(expr, MatAdd):
return expr
if evaluate is None:
evaluate = global_evaluate[0]
def make_expression(terms):
product = [term for term in terms]
return SuperMatMul(*product)
def parse_expression(terms, pattern, mul):
"""Parse terms searching for a pattern.
terms is a list of MatrixExprs
pattern is an expression treated as a product of factors
Returns tuple of unmatched and matched terms.
"""
if (not isinstance(pattern, MatrixSymbol) and
not isinstance(pattern, Transpose) and
not isinstance(pattern, Inverse) and
not isinstance(pattern, MatAdd)):
pattern = pattern.args
else:
pattern = (pattern,)
if len(terms) < len(pattern):
# pattern is longer than matched product
# so no chance for positive parsing result
return None
else:
if not isinstance(pattern, MatAdd):
pattern = [elem for elem in pattern]
terms = terms[:] # need a copy
elems = []
for elem in pattern:
if elem.is_Number:
# a constant is a match for everything
continue
for j in range(len(terms)):
# Search from right if we have a duplicate of 'pattern' in 'terms'. We only want to match one
# based on whether we collect terms on the right or left hand side given by 'mul'.
if mul == 'right':
k = len(terms)-1 - j
else:
k = j
if terms[k] is None:
continue
term = terms[k]
if (((not (isinstance(term, elem.__class__) and (isinstance(elem, MatrixSymbol) or
isinstance(elem, Transpose) or isinstance(elem, Inverse))))
and term.match(elem) is not None) or
(term == elem)):
# found common term so remove it from the expression
# and try to match next element in the pattern
elems.append(terms[k])
terms[k] = None
break
else:
# pattern element not found
return None
return [_f for _f in terms if _f], elems
# Check that syms is of length 1 or 2
if iterable(syms):
syms = [s for s in syms]
if len(syms) > 2:
raise Exception("Too many matching symbols. Maximum is 2")
else:
syms = [syms]
# Check that muls is either a list of same length as syms or a string for which
# syms only has one element
if iterable(muls):
muls = [m for m in muls]
mul = muls[0]
if len(muls) != len(syms):
raise Exception("Number of muls should match syms.")
else:
mul = muls
if not isinstance(mul,str) and len(syms) > 1:
raise Exception("Number of muls should match syms.")
expr = sympify(expr)
# Get all expressions in summation
# If syms[0] is a MatAdd, collect terms in summa that are equal to to the symbol
if isinstance(syms[0], MatAdd) and isinstance(expr, MatAdd):
matched, rejected = ZeroMatrix(expr.shape[0],expr.shape[1]), expr
for s in syms[0].args:
for t in rejected.args:
if s == t:
matched += t
rejected -= t
break
summa = [matched]
if matched != expr:
if isinstance(rejected,MatAdd):
summa += [i for i in rejected.args]
else:
summa += [rejected]
else:
summa = [i for i in expr.args]
collected, disliked = defaultdict(list), ZeroMatrix(expr.shape[0],expr.shape[1])
# For each product in the summation, match the first symbol and update collected/
# disliked depending on whether a match was/wasn't made.
for product in summa:
if isinstance(product, MatMul):
terms = [i for i in product.args]
else:
terms = [product]
# Only look at first symbol
symbol = syms[0]
result = parse_expression(terms, symbol, mul)
# If symbol matched a pattern in terms, we collect the multiplicative terms for the
# symbol into a dictionary 'collected'
if result is not None:
terms, elems = result
index = Identity(elems[0].shape[0])
for elem in elems:
index *= elem
terms = make_expression(terms)
if isinstance(terms, Number):
if mul == 'left':
terms = SuperMatMul(Identity(index.shape[1]),terms)
else:
terms = SuperMatMul(Identity(index.shape[0]),terms)
collected[index].append(terms)
else:
# none of the patterns matched
disliked += product
# add terms now for each key
collected = {k: SuperMatAdd(*v) for k, v in collected.items()}
if isinstance(syms,list) and isinstance(muls,list):
second_mul = muls[1]
first_sym, second_sym = syms
collected[first_sym] = collect(collected[first_sym],[second_sym],second_mul)
if not disliked.is_ZeroMatrix:
if mul == 'left':
collected[Identity(disliked.shape[0])] = disliked
else:
collected[Identity(disliked.shape[1])] = disliked
if evaluate:
if mul == 'left':
if len(collected.items()) == 1:
return [key*val for key, val in collected.items()][0]
else:
if expr.is_MatMul:
return SuperMatMul(*[key*val for key, val in collected.items()])
else:
return SuperMatAdd(*[key*val for key, val in collected.items()])
else: # mul == 'right'
if len(collected.items()) == 1:
return [val*key for key, val in collected.items()][0]
else:
if expr.is_MatMul:
return SuperMatMul(*[val*key for key, val in collected.items()])
else:
return SuperMatAdd(*[val*key for key, val in collected.items()])
else:
return collected
def accept_inv_lemma(e, start, end):
"""
Checks if expr satisfies the matrix form E^{-1}F(H - GE^{-1}F)^{-1}.
We return True if e matches otherwise return False.
"""
def checkSym(a):
return isinstance(a, MatrixSymbol) or isinstance(a, Inverse) or isinstance(a, Transpose)
def checkMatExpr(a, class_name):
return isinstance(a, class_name)
if len(e.args) < 3:
return False
arg_1, arg_2, arg_3 = e.args[start:end+1]
# Match E^{-1}
if not checkSym(arg_1):
return False
# Match E^{-1}F
if not checkSym(arg_2):
return False
# Match E^{-1}F({MatExpr})^{-1}
if not checkMatExpr(arg_3, Inverse):
return False
# Match E^{-1}F({MatAdd})^{-1}
if not checkMatExpr(arg_3.arg, MatAdd):
return False
# Match E^{-1}F(A + B)^{-1}
if len(arg_3.arg.args) == 2:
# Check whether it is E^{-1}F(A + MatMul)^{-1} or E^{-1}F(MatMul + B)^{-1}
if checkSym(arg_3.arg.args[0]) and checkMatExpr(arg_3.arg.args[1], MatMul):
arg_3_args = arg_3.arg.args[1].args
elif checkSym(arg_3.arg.args[1]) and checkMatExpr(arg_3.arg.args[0], MatMul):
arg_3_args = arg_3.arg.args[0].args
else:
return False
else:
return False
# Match E^{-1}F(A + GCD)^{-1} or E^{-1}F(A + (-1)*GCD)^{-1}
if len(arg_3_args) == 3 and not isinstance(arg_3_args[0], type(S.NegativeOne)):
# Check whether CD matches E^{-1}F
if not (arg_3_args[1] == arg_1 and arg_3_args[2] == arg_2):
return False
elif len(arg_3.arg.args[1].args) == 4 and isinstance(arg_3.arg.args[1].args[0], type(S.NegativeOne)):
# Check whether CD matches E^{-1}F
if not (arg_3_args[2] == arg_1 and arg_3_args[3] == arg_2):
return False
else:
return False
# Successful match
return True
def check_inv_lemma(expr):
if len(expr.args) == 3 and accept_inv_lemma(expr,0,2):
return True
else:
return False
def simplify(expr):
"""
A simplification algorithm
We return a tuple of (simps, subs) (See below)
"""
from symgp.superexpressions import SuperMatSymbol
depth = get_max_depth(expand_to_fullexpr(expr))
simps = [] # The simplified expressions we have obtained with the associated substitutions
subs = {} # Pairs substituted expressions with the substitutions made
usedSubs = [] # The expressions we have substituted we have used so far
# Get the expressions at every depth
#exprs_by_depth = get_exprs_at_depth(expr, range(depth+1))
usedNames = SuperMatSymbol.getUsedNames()
min_expr = expr
for d in range(depth, -1, -1):
# Get the exprs at each depth for the new shortest expressions
exprs_by_depth = get_exprs_at_depth(min_expr, range(depth+1))
sub_exprs = exprs_by_depth[d]
min_syms = math.inf
# For each sub expression at level d check for copies in other parts of expressions
for s in sub_exprs:
repetitions = 0
# Find other similar expressions to s
for k in exprs_by_depth.keys():
if k == d:
continue
if s in exprs_by_depth[k]:
repetitions += exprs_by_depth[k].count(s)
# Make replacements if expression 's' appears more than twice throughout expression or
# it corresponds to the special matrix inverse lemma
if (repetitions > 0 or check_inv_lemma(s)) and s not in usedSubs:
# Update the used substituted expressions
usedSubs.append(s)
# TODO: Allow for using best or range of simplified exprs from previous depths
# Lower case for vectors and upper case for matrices
if s.shape[0] != 1 and s.shape[1] != 1:
avail_prefixes = string.ascii_uppercase
else:
avail_prefixes = string.ascii_lowercase
# Keep on searching for available replacement names
for c in avail_prefixes:
i = 0
r_name = c + '_{' + str(i) + '}'
while r_name in usedNames and i < 99:
i += 1
r_name = c + '_{' + str(i) + '}'
if not r_name in usedNames:
r = SuperMatSymbol(s.shape[0], s.shape[1], r_name, expanded=s)
repl_list = [(s,r)]
simp_expr = replace(min_expr, repl_list).doit()
if not subs.get(s):
subs[s] = r
simps.append(simp_expr.doit())
num_syms = get_num_symbols(simp_expr)
if num_syms < min_syms:
min_syms = num_syms
min_expr = simp_expr.doit()
# Check if we can collect any symbols on simp_expr. If we can add to simps.
if isinstance(simp_expr, MatAdd):
ends_of_expr_collection = get_ends(simp_expr)
for ends_of_expr in ends_of_expr_collection:
ends_dict_left = defaultdict(list)
ends_dict_right = defaultdict(list)
ends_dict_both = defaultdict(list)
# Collect left ends and right ends
for l in range(len(ends_of_expr)):
if len(ends_of_expr[l]) == 2:
ends_dict_left[ends_of_expr[l][0]].append(l)
ends_dict_right[ends_of_expr[l][1]].append(l)
ends_dict_both[ends_of_expr[l]].append(l)
else:
ends_dict_left[ends_of_expr[l][0]].append(l)
ends_dict_right[ends_of_expr[l][0]].append(l)
# If there are two or more repetitions of a symbol, collect
for key, val in ends_dict_left.items():
simped = collect(simp_expr,key,'left').doit()
if len(val) >= 2 and not simped in simps:
simps.append(simped)
for key, val in ends_dict_right.items():
simped = collect(simp_expr,key,'right').doit()
if len(val) >= 2 and not simped in simps:
simps.append(simped)
# For cases where both ends are repeated two or more times (e.g. A*P*A + A*Q*A + B), collect
for key, val in ends_dict_both.items():
simped = collect(simp_expr,[key[0],key[1]],['left','right']).doit()
if len(val) >= 2 and not simped in simps:
simps.append(simped)
break
simps = sorted(simps, key=lambda e: get_num_symbols(e))
return simps, subs
######## Quick creation of variables/constants ########
def variables(var_names, var_shapes):
"""
Creates a tuple of SuperMatSymbol Variables with the given names
Args:
var_names - The names of each variable.
Can be a string, list or tuple.
For a string, the variable names are separated by spaces e.g. "u f fs" for variables with
names "u", "f" and "fs".
var_shapes - The shapes of each variable.
Can be a list or tuple of tuples. e.g. [(m,n), (p,q), (i,j)] for shapes (m,n), (p,q) and (i,j)
If the variable is a column vector, we simply need to specify one dimension e.g. [m, p, i] for shapes
(m,1), (p,1) and (i,1).
We can also have combinations e.g [m, (p,q), i]
Returns:
output_vars - A tuple of variables for each
"""
from symgp.superexpressions import Variable
if isinstance(var_names, str):
var_names = var_names.split(" ")
# Lists must be of same length
assert(len(var_names) == len(var_shapes))
for i, shape in enumerate(var_shapes):
if isinstance(shape, Symbol):
var_shapes[i] = (shape,1)
return (Variable(name, shape[0], shape[1]) for name, shape in zip(var_names, var_shapes))
def constants(const_names, const_shapes):
from symgp.superexpressions import Constant
if isinstance(const_names, str):
const_names = const_names.split(" ")
# Lists must be of same length
assert(len(const_names) == len(const_shapes))
for i, shape in enumerate(const_shapes):
if isinstance(shape, Symbol):
const_shapes[i] = (shape,1)
return (Constant(name, shape[0], shape[1]) for name, shape in zip(const_names, const_shapes))
######## Useful functions to get info about expressions ########
def get_exprs_at_depth(expr, depths):
"""
Returns the MatAdd and MatMul expressions in expr at levels given by 'depth' of the expression tree.
The top expression is level 0.
If no expressions at the levels exist, we simply return an empty dict
"""
from symgp.superexpressions import SuperDiagMat, SuperBlockDiagMat
if isinstance(depths, int):
depths = [depths]
else:
depths = list(depths)
exprs_at_depths = defaultdict(list)
stack = [{expr: 0}]
while len(stack) > 0:
sub_expr, level = list(stack.pop().items())[0]
if level in depths and (isinstance(sub_expr, MatAdd) or isinstance(sub_expr, MatMul)):
if isinstance(sub_expr, MatAdd) and len(sub_expr.args) > 2: # Substitute all permutations of > 2 arg MatAdds
sub_expr_perms = get_permutations(sub_expr)
exprs_at_depths[level].extend(sub_expr_perms)
elif isinstance(sub_expr, MatMul): # Substitute
# Remove number at head of expression
if isinstance(sub_expr.args[0], Number):
sub_expr = type(sub_expr)(*sub_expr.args[1:])
if len(sub_expr.args) > 2:
l = len(sub_expr.args)
start, end = 0, 2
while end < l:
if (accept_inv_lemma(sub_expr,start,end)):
new_expr = type(sub_expr)(*sub_expr.args[start:end+1])
exprs_at_depths[level].append(new_expr)
break
else:
start += 1
end += 1
if end == l:
exprs_at_depths[level].append(sub_expr)
else:
exprs_at_depths[level].append(sub_expr)
else:
exprs_at_depths[level].append(sub_expr)
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
for arg in reversed(sub_expr.args): # TODO: Why do we need to reverse?
#print(type(arg),arg)
stack.append({arg: level+1})
return exprs_at_depths
def get_ends(expr):
"""
Returns the left and right matrices of the args of the MatAdd expression, expr.
For example for A*Q*B + 2*C + D*E, we return [(A,B), (C,), (D,E)]
or for (Q+A)*R*(Q+A) + Q + A we return [(Q+A,Q+A), (Q+A,)]
"""
from symgp.superexpressions import SuperMatMul, SuperMatAdd
# The collections of 'ends' lists where each has different groupings of single symbols.
# For example, for an expression (A+B)*Q*(B+C) + A + B + C, the two 'ends' lists we get are:
#
# ends_collection = [[(A+B,B+C), (A+B), (C,)],
# [(A+B,B+C), (A,), (B+C,)]]
#
ends_collection = []
# The preliminary list of end arguments of args of expr
ends = []
expr_args = list(expr.doit().args)
mmul_to_rem = {} # Pairs a MatMul to the remainder keyed by the ends. We ignore expressions of form {Number}*A where
# A is a MatSym, MatTrans or MatInv
for a in expr_args:
a_mmul = a.as_coeff_mmul()[1].doit()
if isinstance(a, MatMul):
ends.append((a_mmul.args[0],a_mmul.args[-1]))
mmul_to_rem[(a_mmul.args[0],a_mmul.args[-1])] = (a,(expr - a).doit())
else:
ends.append((a_mmul,))
ends_collection.append(ends)
for ends_mmul, val in mmul_to_rem.items():
for end in ends_mmul:
if isinstance(end,MatAdd):
rem = val[1]
match = [elem for elem in get_permutations(val[1]) if elem==end]
if len(match) > 1:
raise Exception("More than one match found: %s"%(match))
if len(match) > 0:
new_ends = [ends_mmul]
new_ends.append((match[0],))
for arg in match[0].args:
rem = (rem - arg).doit()
# Get remaining elements
if isinstance(rem, MatMul):
for arg in rem.args:
if isinstance(arg, MatMul):
new_ends.append((arg.args[0],arg.args[-1]))
else:
new_ends.append((arg,))
else:
new_ends.append((rem,))
if not new_ends in ends_collection:
ends_collection.append(new_ends)
return ends_collection
def get_num_symbols(expr):
"""
Returns the number of MatrixSyms in the expression
"""
from symgp.superexpressions import SuperDiagMat, SuperBlockDiagMat
numSyms = 0
stack = [{expr: 0}]
while len(stack) > 0:
sub_expr, level = list(stack.pop().items())[0]
if isinstance(sub_expr, MatrixSymbol):
numSyms += 1
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
for arg in reversed(sub_expr.args): # TODO: Why do we need to reverse?
stack.append({arg: level+1})
return numSyms
def display_expr_tree(expr):
"""
Visualizes the expression tree for the given expression
"""
from symgp.superexpressions import SuperDiagMat, SuperBlockDiagMat
stack = [{expand_to_fullexpr(expr): 0}]
while len(stack) > 0:
sub_expr, level = list(stack.pop().items())[0]
print("-" + 4*level*"-",sub_expr)
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
for arg in reversed(sub_expr.args): # TODO: Why do we need to reverse?
stack.append({arg: level+1})
def get_max_depth(expr):
"""
Get the maximum depth of the expression tree down to the lowest symbol
"""
from symgp.superexpressions import SuperDiagMat, SuperBlockDiagMat
depth = 0
stack = [{expr: 0}]
while len(stack) > 0:
sub_expr, level = list(stack.pop().items())[0]
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
for arg in reversed(sub_expr.args): # TODO: Why do we need to reverse?
stack.append({arg: level+1})
depth = level + 1 if level+1 > depth else depth
return depth
def get_all_kernel_variables(expr):
"""
Returns all the variables that are arguments of KernelMatrix objects that are
in expr
For example, for expr = K(a,u)*K(u,u)*K(u,b), we return kern_vars = [a,u,b]
"""
from symgp import SuperDiagMat, SuperBlockDiagMat, KernelMatrix
kern_vars = []
stack = [(expr,0)]
while len(stack) > 0:
sub_expr, level = stack.pop()
if isinstance(sub_expr,KernelMatrix):
if sub_expr.inputs[0] not in kern_vars:
kern_vars.append(sub_expr.inputs[0])
if sub_expr.inputs[1] not in kern_vars:
kern_vars.append(sub_expr.inputs[1])
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
for arg in reversed(sub_expr.args): # TODO: Why do we need to reverse?
stack.append((arg,level+1))
return kern_vars
def get_permutations(expr):
"""
Returns the permutations of a MatAdd expression for lengths 2 to len(expr).
For example, for A + B + C + D, we return:
[A+B, A+C, A+D, B+C, B+D, C+D, A+B+C, A+B+D, A+C+D, B+C+D, A+B+C+D]
"""
from symgp.superexpressions import SuperMatAdd
import itertools
if isinstance(expr, MatrixSymbol) or isinstance(expr, Transpose) or isinstance(expr, Inverse):
return [expr]
if not isinstance(expr, MatAdd):
raise Exception("Function only works for MatAdd expressions")
expr_args = expr.args
expr_perms = []
for i in range(2,len(expr_args)+1):
expr_perms.extend([SuperMatAdd(*e).doit() for e in itertools.combinations(expr_args,i)])
return expr_perms
def get_var_coeffs(expr, var):
"""
Returns the coeffs for the given variable and the remainder
Args:
- 'expr' - The expanded matrix expression
- 'var' - List of variables for which we find the coefficients
Returns:
- 'coeffs' - A list of coeffs of the variables. Same size as 'var'
- 'rem' - The remaining expression (when we subtract the terms corresponding to variables in 'var')
"""
from symgp.superexpressions import SuperMatMul, SuperMatAdd
coeffs = [ZeroMatrix(expr.shape[0],v.shape[0]) for v in var]
# Search the expression tree for each variable in var then add coefficient to list
if expr.is_MatAdd:
for arg in expr.args:
if arg in var:
for i, v in enumerate(var):
if arg == v:
coeffs[i] = arg.as_coeff_mmul()[0]
else:
for arg2 in arg.args:
if arg2 in var:
for i, v in enumerate(var):
if arg2 == v:
coeffs[i] = SuperMatMul(*[c for c in arg.args if c != arg2]).doit()
rem = SuperMatAdd(*[c for c in expr.args if c not in [c*v for c,v in zip(coeffs,var)]]).doit()
elif expr.is_MatMul:
rem = expr
for arg in expr.args:
if arg in var:
for i, v in enumerate(var):
if arg == v:
coeffs[i] = SuperMatMul(*[c for c in expr.args if c != v]).doit()
rem = ZeroMatrix(expr.shape[0], expr.shape[1])
else:
rem = expr # If no match is made, we leave remainder as expr
for i, v in enumerate(var):
if expr == v:
coeffs[i] = Identity(expr.shape[0])
rem = ZeroMatrix(expr.shape[0],expr.shape[1])
return coeffs, rem
def create_blockform(A,B,C,D):
"""
Create new matrix by top bottom method i.e. create top half of matrix then create bottom
Args:
A, B, C, D - The four partitions of the block matrix. Must be 2-D i.e. all of form [[.]]
Returns:
The full blockform i.e. [[A, B], [C, D]]
"""
top = []
for row1, row2 in zip(A,B):
top.append(row1+row2)
bottom = []
for row1, row2 in zip(C,D):
bottom.append(row1+row2)
return top+bottom
def get_variables(expr):
"""
Returns a list of all the 'Variable' objects in the given expr.
"""
from symgp.superexpressions import SuperDiagMat, SuperBlockDiagMat, Variable
variables_in_expr = []
stack = [(expr, 0)]
while len(stack) > 0:
sub_expr, level = stack.pop()
if isinstance(sub_expr, Variable) and sub_expr not in variables_in_expr:
variables_in_expr.append(sub_expr)
if (isinstance(sub_expr, MatMul) or isinstance(sub_expr, MatAdd) or
isinstance(sub_expr, Inverse) or isinstance(sub_expr, Transpose) or
isinstance(sub_expr, SuperDiagMat) or isinstance(sub_expr, SuperBlockDiagMat)):
for arg in reversed(sub_expr.args): # TODO: Why do we need to reverse?
stack.append((arg, level+1))
return variables_in_expr
######### Other miscellaneous functions #########
def create_distr_name(dep_vars=None, cond_vars=None) -> str:
"""
Creates a name based on the given variables of a distribution and the variables it is
conditioned on
:param dep_vars: The random variables of a distribution. x in p(x|z)
:param cond_vars: The conditioned-on variables of a distribution. z in p(x|z)
:return: A string of the name.
"""
name = ''
if dep_vars:
if not isinstance(dep_vars[0], list):
name += ','.join([v.name for v in dep_vars])
else:
dep_vars_x = dep_vars[0]
dep_vars_y = dep_vars[1]
name += ','.join([v.name for v in dep_vars_x + dep_vars_y])
if cond_vars:
name += '|' + ','.join([v.name for v in cond_vars])
return name
######## GUI lexer ########
class Token(object, metaclass=ABCMeta):
"""
Abstract base class for parser tokens
"""
def __init__(self, *args) -> None:
self.value = args[0]
def __eq__(self, other: 'Token'):
return self.value == other.value
def __str__(self):
return self.value
def __repr__(self):
return type(self).__name__ + '(value=' + self.value + ')'
class DiagToken(Token):
ALLOWED_VALUES = ['diag', 'blockdiag', 'blkdiag']
def __init__(self, t : str) -> None:
"""
Initialises token for diagonal symbols
:param t: The name of the token from the range - 'diag'|'blockdiag'|'blkdiag'
"""
assert t in DiagToken.ALLOWED_VALUES, "t must be one of {}".format(
DiagToken.ALLOWED_VALUES)
super(DiagToken, self).__init__(t)
class OperatorToken(Token):
ALLOWED_VALUES = ['+', '-', '*']
def __init__(self, op : str) -> None:
"""
Initialises token for operator symbols
:param op: The name of the token from the range - '+'|'-'|'*'
"""
assert op in OperatorToken.ALLOWED_VALUES, "op must be one of {}".format(
OperatorToken.ALLOWED_VALUES)
super(OperatorToken, self).__init__(op)
class PlusToken(OperatorToken):
def __init__(self):
super().__init__('+')
class MinusToken(OperatorToken):
def __init__(self):
super().__init__('-')
class StarToken(OperatorToken):
def __init__(self):
super().__init__('*')
class ParenToken(Token):
ALLOWED_VALUES = ['(', ')', '[', ']', '{', '}']
def __init__(self, paren : str) -> None:
"""
Initialises token for parentheses symbols
:param paren: The name of the token from the range - ')'|'('|']'|'['|'{'|'}'
"""
assert paren in ParenToken.ALLOWED_VALUES, "paren must be one of {}".format(
ParenToken.ALLOWED_VALUES)
super(ParenToken, self).__init__(paren)
class LRoundParenToken(ParenToken):
def __init__(self):
super(LRoundParenToken, self).__init__('(')
class RRoundParenToken(ParenToken):
def __init__(self):
super(RRoundParenToken, self).__init__(')')
class LBoxParenToken(ParenToken):
def __init__(self):
super(LBoxParenToken, self).__init__('[')
class RBoxParenToken(ParenToken):
def __init__(self):
super(RBoxParenToken, self).__init__(']')
class LCurlyParenToken(ParenToken):
def __init__(self):
super(LCurlyParenToken, self).__init__('{')
class RCurlyParenToken(ParenToken):
def __init__(self):
super(RCurlyParenToken, self).__init__('}')
class MatIdentifierToken(Token):
def __init__(self, mat : str) -> None:
"""
Initialises token for matrix variable identifier symbols
:param mat: The name of the token. Must start with a upper case letter and only have
alphanumeric characters and/or '_'.
"""
super(MatIdentifierToken, self).__init__(mat)
class VecIdentifierToken(Token):
def __init__(self, vec : str) -> None:
"""
Initialises token for vector variable identifier symbols
:param vec: The name of the token. Must start with a lower case letter and only have
alphanumeric characters and/or '_'.
"""
super(VecIdentifierToken, self).__init__(vec)
class KernelToken(Token):
def __init__(self, name : str, arg1 : str, arg2 : str) -> None:
"""
Initialises token for kernel function symbols.
:param name: The kernel name. Can start with lower or upper case letters
:param arg1: The first argument of the kernel
:param arg2: The second argumnet of the kernel
"""
super(KernelToken, self).__init__(name)
self.arg1 = arg1
self.arg2 = arg2
def __eq__(self, other: Token):
return isinstance(other, KernelToken) and \
(self.value == other.value and self.arg1 == other.arg1 and self.arg2 == other.arg2)
def __str__(self):
return self.value + '(' + self.arg1 + ',' + self.arg2 + ')'
def __repr__(self):
return type(self).__name__ + '(value=' + self.value + ', arg1=' + self.arg1 + ', arg2=' + \
self.arg2 + ')'
class GroupToken(Token):
"""
Groups the supplied tokens into a single token
"""
def __init__(self, tokens: List[Token]):
"""
Initialises the token that groups a sequence of tokens together.
:param tokens: The list of tokens to group
"""
super(GroupToken, self).__init__(tokens)
def tokens(self) -> List[Token]:
return self.value
class InvToken(Token):
ALLOWED_VALUES = ['.I', '^-1', '^{-1}']
def __init__(self, op: str) -> None:
"""
Initialises token representing the inverse operation.
:param op: Must be one of '.I', '^-1', '^{-1}'
"""
assert op in InvToken.ALLOWED_VALUES, "op must be one of {}".format(
InvToken.ALLOWED_VALUES)
super(InvToken, self).__init__(op)
class TransToken(Token):
ALLOWED_VALUES = ['.T', '\'', '^t', '^T', '^{t}', '^{T}']
def __init__(self, op : str) -> None:
"""
Initialises token representing the inverse operation.
:param op: Must be one of ".T", "'", "^t", "^T", "^{t}", "^{T}".
"""
assert op in TransToken.ALLOWED_VALUES, "op must be one of {}".format(
TransToken.ALLOWED_VALUES)
super(TransToken, self).__init__(op)
def get_tokens(expr: str) -> List[Token]:
"""
Converts a string expression into a list of tokens. An exception is raised
if the expression doesn't give a valid parse
:param expr: The expression which we want to turn into a list of tokens.
:return: The list of tokens
"""
# Useful functions
def match_to_symbol(s: str) -> Optional[
Union[MatIdentifierToken, VecIdentifierToken, KernelToken]]:
"""
Determines whether expr matches to mat_identifier, vec_identifier or kernel
:param s: The expression which we want to match
:return: A token if there is a match otherwise we return None
"""
if mat_identifier.fullmatch(s):
return MatIdentifierToken(s)
elif vec_identifier.fullmatch(s):
return VecIdentifierToken(s)
elif kernel.fullmatch(s):
# Break up 's' into the kernel name and the two arguments
match = s.split("(")
name = match[0]
arg1, arg2 = match[1].strip(")").split(",")
return KernelToken(name, arg1, arg2)
else:
return ValueError("Invalid string: {}. Should match regexes: {}, {} or {}".format(
s, mat_identifier.pattern, vec_identifier.pattern, kernel.pattern))
def match_to_mat_op(s: str) -> Optional[Union[TransToken, InvToken]]:
"""
Determines whether s matches inv_sym or trans_sym
:param s: String to be matched
:return: A TransToken or InvToken depending on s
"""
if inv_sym.fullmatch(s):
return InvToken(s)
elif trans_sym.fullmatch(s):
return TransToken(s)
else:
raise ValueError("Invalid string: {}. Should match regexes: {} or {}".format(s, inv_sym.pattern, trans_sym.pattern))
def make_paren_token(s: str) -> Optional[ParenToken]:
if s == '(':
return LRoundParenToken()
elif s == ')':
return RRoundParenToken()
elif s == '[':
return LBoxParenToken()
elif s == ']':
return RBoxParenToken()
elif s == '{':
return LCurlyParenToken()
elif s == '}':
return RCurlyParenToken()
else:
raise ValueError("Invalid paren token. Must be one of '(',')','[',']','{','}'. Provided: %s" % (s))
def make_operator_token(s: str) -> Optional[OperatorToken]:
if s == '+':
return PlusToken()
elif s == '-':
return MinusToken()
elif s == '*':
return StarToken()
else:
raise ValueError('Invalid token. Must be one of "+", "-" or "*". Specified: {}'.format(s))
# Remove meaningless spaces
expr = expr.replace(" ", "")
## Regex expressions ##
# Low-level expressions
digit = re.compile(r"[0-9_]")
lower_char = re.compile(r"[a-z]")
upper_char = re.compile(r"[A-Z]")
operators = re.compile(r"[\+\-\*]")
diag_op = re.compile(r"diag|blkdiag|blockdiag")
inv_sym = re.compile(r"\.I|\^\-1|\^\{\-1\}")
trans_sym = re.compile(r"\.T|\'|\^t|\^T|\^\{t\}|\^\{T\}")
# Matrix and vectors
mat_identifier = re.compile(r"{1}(?:{0}|{1}|{2})*".format( \
lower_char.pattern, upper_char.pattern, digit.pattern))
vec_identifier = re.compile(r"{0}(?:{0}|{1}|{2})*".format( \
lower_char.pattern, upper_char.pattern, digit.pattern))
# Kernels
kernel = re.compile(r"(?:{0}|{1})\((?:{2}|{3}),(?:{2}|{3})\)".format( \
lower_char.pattern, upper_char.pattern, vec_identifier.pattern, mat_identifier.pattern))
# Matrices, vectors and kernels
symbols = re.compile(r"{0}|{1}|{2}".format(
mat_identifier.pattern, vec_identifier.pattern, kernel.pattern)
)
# Full expression to match
#expr_re = re.compile(
# r"^(\()?(?:({0})(\[))?(\()?({1})((?:{3}|{4})|\)|\])?((?:{3}|{4})|\)|\])?((?:{3}|{4})|\)|\])?((?:(?:{2})\(?(?:(?:{0})\[)?(?:{1})(?:(?:{3}|{4})|\)|\])?(?:(?:{3}|{4})|\)|\])?(?:(?:{3}|{4})|\)|\])?)*)(\))?". \
# format(diag_op.pattern, symbols.pattern, operators.pattern, inv_sym.pattern,
# trans_sym.pattern))
expr_re = re.compile(
r"^(\()?(?:({0})(\[))?(\()?({1})((?:(?:[\)\]])?(?:{3}|{4})?)*)((?:(?:{2})\(?(?:(?:{0})\[)?(?:{1})(?:(?:[\)\]])?(?:{3}|{4})?)*)*)(\))?".\
format(diag_op.pattern, symbols.pattern, operators.pattern, inv_sym.pattern,
trans_sym.pattern))
# First match first part of expression then recursively match remainder
tokens = []
expr_match = expr_re.fullmatch(expr)
if expr_match:
groups = expr_match.groups()
#print("groups: ", groups)
if groups[0]: # '('
tokens.append(LRoundParenToken())
if groups[1]: # diag_op
tokens.append(DiagToken(groups[1]))
if groups[2]: # '['
tokens.append(make_paren_token(groups[2]))
if groups[3]: # '('
tokens.append(make_paren_token(groups[3]))
if groups[4]: # mat_identifier|vec_identifier|kernel
tokens.append(match_to_symbol(groups[4]))
# Alternations between (inv_sym|trans_sym) and ]|)
#if groups[5]: # ) | ]
# tokens.append(make_paren_token(groups[5]))
#if groups[6]: # inv_sym | trans_sym
# tokens.append(match_to_mat_op(groups[6]))
close_expr = groups[5]
close_expr_pat = re.compile(r"([\)\]])?({0}|{1})?((?:[\)\]]?(?:{0}|{1})?)*)".format(
inv_sym.pattern, trans_sym.pattern))
while len(close_expr) > 0:
close_expr_groups = close_expr_pat.fullmatch(close_expr).groups()
if close_expr_groups[0]: # ) | ]
tokens.append(make_paren_token(close_expr_groups[0]))
if close_expr_groups[1]: # inv_sym | trans_sym
tokens.append(match_to_mat_op(close_expr_groups[1]))
close_expr = close_expr_groups[2]
# (inv_sym|trans_sym)|']'|')' (3 times)
#for i in range(5,8):
# if groups[i]:
# try:
# token = make_paren_token(groups[i])
# except ValueError:
# token = match_to_mat_op(groups[i])
# tokens.append(token)
## Repeat for the rest of the expression
right = groups[6] # The remainder of the expression if it exists excluding last bracket
#right_regex = re.compile(
# r"^({0})(\()?(?:({4})(\[))?({1})((?:{2}|{3})|\)|\])?((?:{2}|{3})|\)|\])?((?:{2}|{3})|\)|\])?((?:(?:{0})\(?(?:(?:{4})\[)?(?:{1})(?:(?:{2}|{3})|\)|\])?(?:(?:{2}|{3})|\)|\])?(?:(?:{2}|{3})|\)|\])?)*)".format(\
# operators.pattern, symbols.pattern, inv_sym.pattern, trans_sym.pattern,
# diag_op.pattern))
right_regex = re.compile(
r"^({0})(\()?(?:({4})(\[))?({1})((?:(?:[\)\]])?(?:{2}|{3})?)*)((?:(?:{0})\(?(?:(?:{4})\[)?(?:{1})(?:(?:[\)\]])?(?:{2}|{3})?)*)*)".format( \
operators.pattern, symbols.pattern, inv_sym.pattern, trans_sym.pattern,
diag_op.pattern))
while len(right) > 0:
subgroups = right_regex.fullmatch(right).groups()
#print("subgroups: ", subgroups)
if subgroups[0]: # operators
tokens.append(make_operator_token(subgroups[0]))
else:
raise RuntimeError("Scanning error: Missing operator")
if subgroups[1]: # '('
tokens.append(make_paren_token(subgroups[1]))
if subgroups[2]: # 'diag_op'
tokens.append(DiagToken(subgroups[2]))
if subgroups[3]: # '['
tokens.append(make_paren_token(subgroups[3]))
if subgroups[4]: # mat_identifier|vec_identifier|kernel
tokens.append(match_to_symbol(subgroups[4]))
else:
raise RuntimeError("Scanning error: Missing mat_identifier, vec_identifier or kernel.")
# Alternations between (inv_sym|trans_sym) and ]|)
# if groups[5]: # ) | ]
# tokens.append(make_paren_token(groups[5]))
# if groups[6]: # inv_sym | trans_sym
# tokens.append(match_to_mat_op(groups[6]))
close_expr = subgroups[5]
close_expr_pat = re.compile(r"([\)\]])?({0}|{1})?((?:[\)\]]?(?:{0}|{1})?)*)".format(
inv_sym.pattern, trans_sym.pattern))
while len(close_expr) > 0:
close_expr_groups = close_expr_pat.fullmatch(close_expr).groups()
if close_expr_groups[0]: # ) | ]
tokens.append(make_paren_token(close_expr_groups[0]))
if close_expr_groups[1]: # inv_sym | trans_sym
tokens.append(match_to_mat_op(close_expr_groups[1]))
close_expr = close_expr_groups[2]
# (inv_sym|trans_sym)|']'|')' (3 times)
#for i in range(5, 8):
# if subgroups[i]:
# try:
# token = make_paren_token(subgroups[i])
# except ValueError:
# token = match_to_mat_op(subgroups[i])
#
# tokens.append(token)
right = subgroups[6]#[8]
if groups[7]:
tokens.append(RRoundParenToken())
return tokens
else:
raise Exception("Invalid input")
def tokens_to_string(tokens : List[Token]) -> str:
"""
Converts a list of tokens to the string they represent.
:param tokens: The ordered list of tokens
:return: The string representation of the list of tokens
"""
output = ""
for token in tokens:
if any([isinstance(token, token_class) for token_class in \
[DiagToken, OperatorToken, ParenToken, MatIdentifierToken, VecIdentifierToken]]):
output += token.value
elif isinstance(token, InvToken) or isinstance(token, TransToken):
sym = token.value
if isinstance(sym, KernelToken):
output += sym.value + "(" + sym.arg1 + "," + sym.arg2 + ")"
elif isinstance(sym, GroupToken):
output += tokens_to_string(sym.tokens())
else:
output += sym.value
if isinstance(token, InvToken):
output += ".I"
else:
output += ".T"
elif isinstance(token, GroupToken):
output += tokens_to_string(token.tokens())
else:
output += token.value +"(" + token.arg1 + ","+ token.arg2 + ")"
return output
######## GUI AST classes ########
## AST Printer stuff ##
class VisitorBase(object, metaclass=ABCMeta):
"""
Abstract class for Visitor from the Visitor pattern.
"""
def visit_binary(self, binary: 'Binary'):
raise NotImplementedError()
def visit_unary(self, unary: 'Unary'):
raise NotImplementedError()
def visit_literal(self, literal: 'Literal'):
raise NotImplementedError()
def visit_kernel_literal(self, kern_lit: 'KernelLiteral'):
raise NotImplementedError()
def visit_grouping(self, grouping: 'Grouping'):
raise NotImplementedError()
def visit_diag(self, diag: 'Diag'):
raise NotImplementedError()
def visit_matop(self, matop: 'MatOp'):
raise NotImplementedError()
class ASTPrinter(VisitorBase):
def print_ast(self, expr: 'ASTNode'):
return expr.accept(self)
def visit_binary(self, binary: 'Binary'):
return self.parenthesise(binary.operator, binary.left, binary.right)
def visit_unary(self, unary: 'Unary'):
return self.parenthesise(unary.operator, unary.right)
def visit_literal(self, literal: 'Literal'):
return self.parenthesise(literal.value)
def visit_kernel_literal(self, kern_lit: 'KernelLiteral'):
return self.parenthesise(kern_lit.name, kern_lit.arg1, kern_lit.arg2)
def visit_grouping(self, grouping: 'Grouping'):
return self.parenthesise("group", grouping.expr)
def visit_diag(self, diag: 'Diag'):
return self.parenthesise(diag.diag_op, diag.expr)
def visit_matop(self, matop: 'MatOp'):
return self.parenthesise(matop.mat_op, matop.expr)
def parenthesise(self, name: str, *exprs: Iterable['ASTNode']):
out_str = "( " + name
for expr in exprs:
out_str += " "
if isinstance(expr, ASTNode):
out_str += expr.accept(self)
else:
out_str += expr
out_str += ")"
return out_str
## Node classes ##
class ASTNode(object, metaclass=ABCMeta):
def __eq__(self, other):
if type(self) == type(other):
return all([self.__dict__[k] == other.__dict__[k] for k in self.__dict__.keys() if not k.startswith('_')])
return False
def accept(self, visitor: VisitorBase):
raise NotImplementedError("Should be implemented by subclasses.")
class Binary(ASTNode):
def __init__(self, left, operator: OperatorToken, right):
self.left = left
self.operator = operator.value
self.right = right
def __str__(self):
return str(self.left) + self.operator + str(self.right)
def __repr__(self):
return "Binary(left={}, operator={}, right={}".format(self.left, self.operator, self.right) + ")"
def accept(self, visitor: VisitorBase):
return visitor.visit_binary(self)
class Unary(ASTNode):
def __init__(self, operator: OperatorToken, right):
self.operator = operator.value
self.right = right
def __str__(self):
return self.operator + str(self.right)
def __repr__(self):
return "Unary(operator={}, right={})".format(self.operator, self.right)
def accept(self, visitor: VisitorBase):
return visitor.visit_unary(self)
class Literal(ASTNode):
def __init__(self, value: str):
self.value = value
def __str__(self):
return self.value
def __repr__(self):
return "Literal(value={})".format(repr(self.value))
def accept(self, visitor: VisitorBase):
return visitor.visit_literal(self)
class KernelLiteral(ASTNode):
def __init__(self, name: str, arg1: str, arg2: str):
self.name = name
self.arg1 = arg1
self.arg2 = arg2
def __str__(self):
return self.name + "(" + self.arg1 + ", " + self.arg2 + ")"
def __repr__(self):
return "KernelLiteral(name={}, arg1={}, arg2={})".format(self.name, self.arg1, self.arg2)
def accept(self, visitor: VisitorBase):
return visitor.visit_kernel_literal(self)
class Grouping(ASTNode):
def __init__(self, expr):
self.expr = expr
def __str__(self):
return "(" + str(self.expr) + ")"
def __repr__(self):
return "Grouping(expr={})".format(self.expr)
def accept(self, visitor: VisitorBase):
return visitor.visit_grouping(self)
class Diag(ASTNode):
def __init__(self, diag_op: DiagToken, expr):
self.diag_op = diag_op.value
self.expr = expr
def __repr__(self):
return "Diag(diag_op={}, expr={})".format(self.diag_op, self.expr)
def __str__(self):
return self.diag_op + "[" + str(self.expr) + "]"
def accept(self, visitor: VisitorBase):
return visitor.visit_diag(self)
class MatOp(ASTNode):
def __init__(self, expr, mat_op: Union[InvToken, TransToken]):
self.expr = expr
self.mat_op = mat_op.value
def __repr__(self):
return "MatOp(expr={}, mat_op={})".format(self.expr, self.mat_op)
def __str__(self):
return str(self.expr) + self.mat_op
def accept(self, visitor: VisitorBase):
return visitor.visit_matop(self)
# Parsing functions
def parse(tokens):
"""
Parses a list of tokens to produce an expression with a dictionary
of the objects created
"""
current = 0 # Index of current token.
def previous() -> Token:
return tokens[current-1]
def advance() -> Token:
nonlocal current
if not is_at_end():
current += 1
return previous()
def peek() -> Token:
return tokens[current]
def is_at_end() -> bool:
return current == len(tokens)
def match(*token_types) -> bool:
for token_type in token_types:
if check(token_type):
advance()
return True
return False
def check(token_type) -> bool:
if is_at_end():
return False
return isinstance(peek(), token_type)
def consume(token_type, message: str) -> Token:
if check(token_type):
return advance()
raise error(peek(), message)
def error(token: Token, message: str):
return RuntimeError(message + " Actual: " + token.value)
def primary():
if match(MatIdentifierToken, VecIdentifierToken):
return Literal(previous().value)
elif match(KernelToken):
kern_tok = previous() # type: KernelToken
return KernelLiteral(kern_tok.value, kern_tok.arg1, kern_tok.arg2)
elif match(LRoundParenToken):
expr = expression()
consume(RRoundParenToken, "Expect ')' after expression.")
return Grouping(expr)
elif match(DiagToken):
diag_op = previous() # type: DiagToken
consume(LBoxParenToken, "Expect '[' after diag_op and before expression.")
expr = expression()
consume(RBoxParenToken, "Expect ']' after expression.")
return Diag(diag_op, expr)
def unary():
if match(MinusToken):
operator = previous() # type: OperatorToken
right = unary()
out_expr = Unary(operator, right)
else:
out_expr = primary()
if match(TransToken, InvToken):
matop = previous() # type: Union[TransToken, InvToken]
return MatOp(out_expr, matop)
else:
return out_expr
def multiplication():
expr = unary()
while match(StarToken):
operator = previous() # type: OperatorToken
right = unary()
expr = Binary(expr, operator, right)
return expr
def addition():
expr = multiplication()
while match(PlusToken, MinusToken):
operator = previous() # type: OperatorToken
right = multiplication()
expr = Binary(expr, operator, right)
return expr
def expression():
return addition()
return expression()
def print_ast(ast: ASTNode):
ast_printer = ASTPrinter()
return ast_printer.print_ast(ast)
# Interpreter
class Interpreter(VisitorBase, metaclass=ABCMeta):
def __init__(self, namespace: Dict[str, Any]):
"""
Initialises interpreter.
:param namespace: Dictionary mapping names to Python objects that are used to evaluate
expression. For example for a SymGP Constant named 'A', we would have the entry:
namespace['A'] = Constant('A')
For Kernels, we have to append '_kern' to the Kernel name to distinguish it from matrix
symbols.
We assume in all the 'visit*' functions below that all the required objects have been defined
previously.
"""
self._ns = namespace
def interpret(self, expr: ASTNode):
return self.evaluate(expr)
def evaluate(self, expr: ASTNode):
return expr.accept(self)
def visit_binary(self, expr: Binary):
left = self.evaluate(expr.left)
right = self.evaluate(expr.right)
if expr.operator == '+':
return left + right
elif expr.operator == '-':
return left - right
elif expr.operator == '*':
return left * right
else:
return None
def visit_unary(self, expr: Unary):
right = self.evaluate(expr.right)
if expr.operator == '-':
return -right
else:
return None
def visit_literal(self, expr: Literal):
return self._ns[expr.value]
def visit_kernel_literal(self, expr: KernelLiteral):
from symgp.kernels import Kernel
arg1, arg2 = self._ns[expr.arg1], self._ns[expr.arg2]
kern = self._ns[expr.name + '_kern'] # type: Kernel
return kern(arg1, arg2)
def visit_grouping(self, expr: Grouping):
return self.evaluate(expr.expr)
def visit_diag(self, expr: Diag):
from symgp.superexpressions.supermatbase import SuperMatBase
from symgp.superexpressions import SuperDiagMat, SuperBlockDiagMat
arg = self.evaluate(expr.expr) # type: SuperMatBase
if expr.diag_op == 'diag':
return SuperDiagMat(arg)
elif expr.diag_op == 'blkdiag' or expr.diag_op == 'blockdiag':
return SuperBlockDiagMat(arg)
else:
return None
def visit_matop(self, expr: MatOp):
from symgp.superexpressions import SuperMatTranspose, SuperMatInverse
arg = self.evaluate(expr.expr)
trans_ops = [".T", "'", "^t", "^T", "^{t}", '^{T}']
inv_ops = [".I", "^-1", "^{-1}"]
if expr.mat_op in trans_ops:
return SuperMatTranspose(arg)
elif expr.mat_op in inv_ops:
return SuperMatInverse(arg)
else:
return None
``` |
{
"source": "jnack007/Django_spools",
"score": 2
} |
#### File: Django_spools/spools/views.py
```python
from django.views.generic import TemplateView, ListView
from .models import Spool
from django.db.models import Q
# Create your views here.
class HomePageView(TemplateView):
template_name = 'home.html'
class SearchResultsView(ListView):
model = Spool
template_name = 'search_results.html'
def get_queryset(self):
query = self.request.GET.get('q')
object_list = Spool.objects.filter(
Q(tag=query) | Q(level=query)
)
return object_list
# to add an or filter see below example:
# Q(tag='AS-L00-050') | Q(level='Level 00')
``` |
{
"source": "jnadro/pybgfx",
"score": 2
} |
#### File: pybgfx/pybgfx/bgfx_tests.py
```python
import ctypes
import bgfx
import unittest
class TestImport(unittest.TestCase):
def test_module(self):
self.assertEqual(bgfx.__author__, '<NAME>')
self.assertEqual(bgfx.__license__, 'BSD 2-clause')
self.assertEqual(bgfx.__status__, 'Development')
class TestEnums(unittest.TestCase):
def test_bgfx_renderer_type(self):
self.assertEqual(type(bgfx.BGFX_RENDERER_TYPE_NOOP), ctypes.c_int)
self.assertEqual(bgfx.BGFX_RENDERER_TYPE_NOOP.value, 0)
self.assertEqual(bgfx.BGFX_RENDERER_TYPE_DIRECT3D9.value, 1)
self.assertEqual(bgfx.BGFX_RENDERER_TYPE_DIRECT3D11.value, 2)
self.assertEqual(bgfx.BGFX_RENDERER_TYPE_DIRECT3D12.value, 3)
self.assertEqual(bgfx.BGFX_RENDERER_TYPE_GNM.value, 4)
self.assertEqual(bgfx.BGFX_RENDERER_TYPE_METAL.value, 5)
self.assertEqual(bgfx.BGFX_RENDERER_TYPE_OPENGLES.value, 6)
self.assertEqual(bgfx.BGFX_RENDERER_TYPE_OPENGL.value, 7)
self.assertEqual(bgfx.BGFX_RENDERER_TYPE_VULKAN.value, 8)
self.assertEqual(bgfx.BGFX_RENDERER_TYPE_COUNT.value, 9)
class TestInit(unittest.TestCase):
def test_init(self):
init = bgfx.bgfx_init_t()
bgfx.init_ctor(ctypes.pointer(init))
result = bgfx.init(ctypes.pointer(init))
self.assertEqual(result, True)
bgfx.shutdown()
class TestAPI(unittest.TestCase):
def setUp(self):
init = bgfx.bgfx_init_t()
bgfx.init_ctor(ctypes.pointer(init))
bgfx.init(ctypes.pointer(init))
def tearDown(self):
bgfx.shutdown()
def test_get_supported_renderers(self):
renderers = (bgfx.bgfx_renderer_type * bgfx.BGFX_RENDERER_TYPE_COUNT.value)(-1, -1, -1, -1, -1, -1, -1, -1, -1)
num_renderers = bgfx.get_supported_renderers(bgfx.BGFX_RENDERER_TYPE_COUNT.value, ctypes.cast(renderers, ctypes.POINTER(bgfx.bgfx_renderer_type)))
self.assertGreater(num_renderers, 0)
self.assertLessEqual(num_renderers, bgfx.BGFX_RENDERER_TYPE_COUNT.value)
for i in range(0, bgfx.BGFX_RENDERER_TYPE_COUNT.value):
renderer_type = renderers[i]
if i < num_renderers:
self.assertGreaterEqual(renderer_type, 0)
self.assertLess(renderer_type, bgfx.BGFX_RENDERER_TYPE_COUNT.value)
else:
self.assertEqual(renderer_type, -1)
def test_get_renderer_name(self):
self.assertEqual(bgfx.get_renderer_name(0), b'Noop')
self.assertEqual(bgfx.get_renderer_name(1), b'Direct3D 9')
self.assertEqual(bgfx.get_renderer_name(2), b'Direct3D 11')
self.assertEqual(bgfx.get_renderer_name(3), b'Direct3D 12')
self.assertEqual(bgfx.get_renderer_name(4), b'GNM')
self.assertEqual(bgfx.get_renderer_name(5), b'Noop')
self.assertEqual(bgfx.get_renderer_name(6), b'OpenGL 2.1')
self.assertEqual(bgfx.get_renderer_name(7), b'OpenGL 2.1')
self.assertEqual(bgfx.get_renderer_name(8), b'Vulkan')
def test_get_renderer_type(self):
renderer_type = bgfx.get_renderer_type()
self.assertNotEqual(renderer_type, 0)
def test_get_caps(self):
caps = bgfx.get_caps()
# check against null pointer
# see: https://docs.python.org/2/library/ctypes.html#pointers
self.assertNotEqual(caps, ctypes.POINTER(bgfx.caps)())
# get the actual object
caps = caps.contents
print("Renderer Type: ", bgfx.get_renderer_name(caps.rendererType))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "jnafolayan/unilag-nav-system",
"score": 3
} |
#### File: jnafolayan/unilag-nav-system/Button.py
```python
from Entity import Entity
class Button(Entity):
def __init__(self, text, width, height, fill, text_color, font_size, action):
super().__init__()
self.text = text
self.width = width
self.height = height
self.fill = fill
self.text_color = text_color
self.font_size = font_size
self.action = action
def draw(self, arcade):
arcade.draw_lrtb_rectangle_outline(self.x, self.x + self.width, self.y + self.height, self.y, (0,0,0))
arcade.draw_text(self.text, self.x + 10, self.y + 10, self.text_color, self.font_size)
def on_mouse_press(self):
super().on_mouse_press()
self.action()
```
#### File: jnafolayan/unilag-nav-system/Entity.py
```python
class Entity:
"""
" This is the base class for everything that will be
" rendered onto the screen.
"""
def __init__(self):
self.type = ''
self.app = None
self.mouse_over = False
self.mouse_down = False
self.x = 0
self.y = 0
def set_app(self, app):
self.app = app
def on_mouse_motion(self):
self.mouse_over = True
def on_mouse_leave(self):
self.mouse_over = False
def on_mouse_press(self):
self.mouse_down = True
def on_mouse_release(self):
self.mouse_down = False
def update(self, dt, arcade):
pass
def draw(self, arcade):
pass
```
#### File: jnafolayan/unilag-nav-system/findpath.py
```python
from Node import Node
def find_path(start, end):
closed_nodes = {}
open_nodes = [start]
# ensure the start node has no cost
start.cost = 0
while open_nodes:
node = open_nodes.pop(0)
if node.key in closed_nodes:
continue
neighbors = node.neighbors
for neighbor, weight in neighbors:
new_cost = node.cost + weight
if new_cost < neighbor.cost:
neighbor.cost = new_cost
neighbor.parent = node
open_nodes.append(neighbor)
closed_nodes[node.key] = node
path = []
node = end
while node and node != start:
path.append(node.key)
node = node.parent
path.append(start.key)
return path[-1::-1]
``` |
{
"source": "JNagasava/Polynomial-Interpolation",
"score": 4
} |
#### File: Polynomial-Interpolation/interpolation/graphs.py
```python
from matplotlib import pyplot as plt
def create_graph(V, color, label):
"""
Setup the graph informations: x values, y values, color and label name.
Parameters
----------
V : dict
V contains X and Y values.
color : str
color name.
label : str
label name
Returns
-------
dict :
Returns a dict = {'x': X, 'y': Y, 'color': color, 'label': label}
"""
x, y = list(V.keys())
return {
'x': V[x],
'y': V[y],
'color': color,
'label': label
}
def plot_graph(graphs, title=None, xlabel='x', ylabel='y'):
"""
Plot graphs using matplot libray
Paramaters
----------
graphs : list
List of created graphs
title : str
title of graph
xlabel : str
name of x axis
ylabel : str
name of y axis
"""
for g in graphs:
plt.plot(g['x'], g['y'], g['color'], label=g['label'])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend()
plt.show()
```
#### File: interpolation/methods/linear_system.py
```python
import numpy as np
def swap_rows(Z, a, b):
"""
Swap two rows (a, b) from Z matrix (np.array)
Parameters
----------
Z : np.array
matrix
a : int
index row from Z
b : int
index row from Z
"""
temp = np.copy(Z[a])
Z[a] = Z[b]
Z[b] = temp
def partial_pivoting(Z, row):
"""
Partial pivoting of Z matrix, starting at row param
Parameters
----------
Z : np.array
matrix
row : int
index row from Z
Returns
-------
pivot : int
pivot is the max value from Z(row:,row)
"""
pivot_row = np.argmax(np.abs(Z[row:, row])) + row
swap_rows(Z, row, pivot_row)
pivot = Z[row, row]
return pivot
def solve_sys(X, Y):
"""
Solve a linear system using Gauss Elimination
Parameters
----------
X : list
list of x values
Y : list
list of y values
Returns
-------
list
returns the roots of linear system (X, Y)
"""
Z = np.copy(X)
Z = np.hstack([Z, np.transpose(np.array([Y]))])
for j in range(Z.shape[0] - 1):
pivot = partial_pivoting(Z, j)
for i in range(j + 1, Z.shape[0]):
if Z[i, j] != 0 :
m = pivot / Z[i, j]
Z[i, j:] = Z[j, j:] - (m * Z[i, j:])
A = np.zeros((X.shape[0], 1))
for k in range(Z.shape[0] - 1, -1, -1):
A[k] = (Z[k, Z.shape[1]-1] - (Z[k, Z.shape[1]-2:k:-1] @ A[A.shape[0]:k:-1])) / Z[k, k]
return np.ndarray.tolist(np.transpose(A))[0]
def vandermond(X):
"""
Create a vandermond matrix(nxn) by x values
Parameters
----------
X : list
list of x values
Returns
-------
np.array
vandermond matrix
"""
n = len(X)
V = np.zeros((n, n))
for i in range(n):
V[i, :] = [X[i]**k for k in range(n)]
return V
def linsys(X, Y):
"""
Polynomial Interpolation using Gauss Elimination
Parameters
----------
X : list
list of X values
Y : list
list of Y values
Returns
-------
function
function of polynomial interpolation (using linear system)
"""
V = vandermond(X)
A = solve_sys(V, Y)
def f(x):
return sum([a*(x**p) for p, a in enumerate(A)])
return f
```
#### File: interpolation/methods/newton.py
```python
import numpy as np
def newton(X, Y):
"""
Polynomial Interpolation using Newton's method
Parameters
------------
X : list
list of x values
Y : list
list of y values
Returns
-------
function
function of polynomial interpolation (using newton's method)
"""
n = len(X)
K = np.zeros((n, n))
K[:, 0] = Y
for k in range(1, n):
K[k:, k] = [(K[i, k - 1] - K[i - 1, k - 1]) / (X[i] - X[i - k]) for i in range(k, K.shape[0])]
D = np.ndarray.tolist(np.diag(K))
def f(x):
A = np.ones((n))
A[1: n] = [(x - X[k]) for k in range(n - 1)]
return sum(D[i] * np.prod(A[: i + 1]) for i in range(n))
return f
``` |
{
"source": "jnak/imgix-python",
"score": 3
} |
#### File: imgix-python/imgix/urlbuilder.py
```python
import re
from .urlhelper import UrlHelper
from .constants import DOMAIN_PATTERN, SRCSET_TARGET_WIDTHS
SRCSET_DPR_TARGET_RATIOS = range(1, 6)
class UrlBuilder(object):
"""
Create imgix URLs
The URL builder can be reused to create URLs for any images on the
provided domain.
Parameters
----------
domain : str
Domain to use while creating imgix URLs.
use_https : bool
If `True`, create HTTPS imgix image URLs. (default `True`)
sign_key : str or None
When provided, this key will be used to sign the generated image URLs.
You can read more about URL signing on our docs:
https://docs.imgix.com/setup/securing-images
include_library_param : bool
If `True`, each created URL is suffixed with 'ixlib' parameter
indicating the library used for generating the URLs. (default `True`)
Methods
-------
validate_domain(domain)
Returns true if the supplied string parameter pattern matches a valid
domain name accepted by imgix
create_url(path, params=None)
Create URL with the supplied path and `params` parameters dict.
create_srcset(path, params=None)
Create srcset attribute value with the supplied path and
`params` parameters dict.
Will generate a fixed-width DPR srcset if a width OR height and aspect
ratio are passed in as parameters. Otherwise will generate a srcset
with width-descriptor pairs.
"""
def __init__(
self,
domain,
use_https=True,
sign_key=None,
include_library_param=True):
self.validate_domain(domain)
self._domain = domain
self._sign_key = sign_key
self._use_https = use_https
self._include_library_param = include_library_param
def validate_domain(self, domain):
"""
Returns true if the supplied string parameter pattern matches a valid
domain name accepted by imgix
Parameters
----------
domain : str
Returns
-------
bool
"""
err_str = str(
'Domain must be passed in as fully-qualified domain names and ' +
'should not include a protocol or any path element, i.e. ' +
'"example.imgix.net".')
if re.match(DOMAIN_PATTERN, domain) is None:
raise ValueError(err_str)
def create_url(self, path, params=None):
"""
Create URL with supplied path and `params` parameters dict.
Parameters
----------
path : str
params : dict
Dictionary specifying URL parameters. Non-imgix parameters are
added to the URL unprocessed. For a complete list of imgix
supported parameters, visit https://docs.imgix.com/apis/url .
(default None)
Returns
-------
str
imgix URL
"""
if not params:
params = {}
domain = self._domain
scheme = "https" if self._use_https else "http"
url_obj = UrlHelper(
domain,
path,
scheme,
sign_key=self._sign_key,
include_library_param=self._include_library_param,
params=params)
return str(url_obj)
def create_srcset(self, path, params=None):
"""
Create srcset attribute value with the supplied path and
`params` parameters dict.
Will generate a fixed-width DPR srcset if a width OR height and aspect
ratio are passed in as parameters. Otherwise will generate a srcset
with width-descriptor pairs.
Parameters
----------
path : str
params : dict
Dictionary specifying URL parameters. Non-imgix parameters are
added to the URL unprocessed. For a complete list of imgix
supported parameters, visit https://docs.imgix.com/apis/url .
(default None)
Returns
-------
str
srcset attribute value
"""
if not params:
params = {}
width = params['w'] if 'w' in params else None
height = params['h'] if 'h' in params else None
aspect_ratio = params['ar'] if 'ar' in params else None
if (width or (height and aspect_ratio)):
return self._build_srcset_DPR(path, params)
else:
return self._build_srcset_pairs(path, params)
def _build_srcset_pairs(self, path, params):
srcset = ''
for i in range(len(SRCSET_TARGET_WIDTHS)):
current_width = SRCSET_TARGET_WIDTHS[i]
current_params = params
current_params['w'] = current_width
srcset += self.create_url(path, current_params) \
+ ' ' + str(current_width) + 'w,\n'
return srcset[0:-2]
def _build_srcset_DPR(self, path, params):
srcset = ''
for i in range(len(SRCSET_DPR_TARGET_RATIOS)):
current_ratio = SRCSET_DPR_TARGET_RATIOS[i]
current_params = params
current_params['dpr'] = i+1
srcset += self.create_url(path, current_params) \
+ ' ' + str(current_ratio) + 'x,\n'
return srcset[0:-2]
``` |
{
"source": "Jnalis/frappe-health-care",
"score": 2
} |
#### File: doctype/practitioner/practitioner.py
```python
from frappe.model.document import Document
class Practitioner(Document):
def before_save(self):
self.practitioner_full_name = f'{self.first_name} {self.second_name or ""}'
``` |
{
"source": "jnamika/pyfunctor",
"score": 3
} |
#### File: src/pyfunctor/functor.py
```python
import sys
import types
from functools import reduce
from itertools import chain
from operator import itemgetter
class Functor:
'''Functor(value) -> new Functor object
Alias for Functor: F
Example 1 (pipeline operator):
>>> x = Functor('abc')
>>> y = x >> str.upper >> (lambda x: x + 'def')
>>> run(y)
'ABCdef'
Example 2 (with block):
>>> with Functor(0):
... def f(x):
... return x + 10
... def b(x):
... return x * 2
... def c(x):
... print(x)
20
'''
def __init__(self, x):
self.value = x
self.fs = []
@classmethod
def fmap(cls, f):
return f
@classmethod
def lift(cls, f):
def _f(*margs):
args = (m.run() for m in margs)
return cls.fmap(f)(*args)
return _f
def run(self):
'''F.run() -> calcuated value'''
return reduce(lambda x, f: f(x), self.fs, self.value)
def _composition(self, f):
functor = self.__class__(self.value)
functor.fs = self.fs + [self.fmap(f)]
return functor
def __rshift__(self, f):
return self._composition(f)
def __rlshift__(self, f):
return self._composition(f)
def __call__(self):
return self.run()
def __eq__(self, x):
return (self.__class__ == x.__class__ and self.fs == x.fs and
self.value == x.value)
def __enter__(self):
def call(self, f):
def _(*args, **kwd):
return f(*args, **kwd)
_._src_location = _location(1)[:2]
self._slots.append(_)
return _
self._start = _location(1)[1]
self._functor = self.__class__(None)
self._functor._slots = []
self._functor.call = types.MethodType(call, self._functor)
return self._functor
def __exit__(self, extype, exval, tb):
if extype is None:
filename, end, f_locals = _location(1)
fs = set()
for f in chain(f_locals.values(), self._functor._slots):
if isinstance(f, types.FunctionType):
if hasattr(f, '_src_location'):
fn, n = f._src_location
else:
code = f.__code__
fn, n = code.co_filename, code.co_firstlineno
if fn == filename and self._start <= n <= end:
fs.add((n, f))
functor = self
for n, f in sorted(fs, key=itemgetter(0)):
functor = functor._composition(f)
self._functor.value = functor.run()
return False
F = Functor
def run(x):
'''run(functor) -> calcuated value'''
return x.run()
class Lift:
'''Lift(func) -> new function lifted into a functor's context.
Alias for Lift: lift
Example:
>>> x = (lambda x: x + 1) << F(3)
>>> y = (lambda x: x ** 2) << F(5)
>>> z = lift(lambda x, y: x + y)(x, y)
>>> z()
29'''
def __init__(self, f):
self.f = f
def __call__(self, *margs):
cls = margs[0].__class__
_f = lambda _: cls.lift(self.f)(*margs)
functor = cls(None)
functor.fs.append(_f)
return functor
lift = Lift
def _location(depth=0):
frame = sys._getframe(depth + 1)
return frame.f_code.co_filename, frame.f_lineno, frame.f_locals
class Curry:
'''Curry(func) -> new curried function.
Curry(func, n) -> new curried function with an argument index.
Curry(func).name -> new curried function with a keyword argument.
Curry(func, keyword='name') -> it is same as Curry(func).name
This converts an uncurried function to a curried function.
Alias for Curry: curry, c_
The following laws are satisfied:
c_(f)(x)(*args, **kwd) == f(x, *args, **kwd)
c_(f, n)(x)(*args, **kwd) == f(*(list(args[:n])+[x]+list(args[n:])), **kwd)
c_(f).name(x)(*args, **kwd) == f(*args, **dict(name=x, **kwd)))
c_(f, keyword='name')(x)(*args, **kwd) == f(*args, **dict(name=x, **kwd)))
Example 1:
>>> run(F(range(10)) >> c_(map)(lambda x: x * 2)
... >> c_(filter)(lambda x: x < 7)
... >> c_(sorted).key(lambda x: -x))
[6, 4, 2, 0]
Example 2 (decorator):
>>> with F(range(10)) as box:
... @c_(map)
... def f(x):
... y = x % 3
... z = x + y
... return x + y + z
... @c_(sorted, keyword='key')
... def g(x):
... return (x % 7, x % 3, x)
>>> box.value
[0, 14, 8, 16, 10, 18, 4, 12, 6, 20]
'''
def __init__(self, func, index=0, keyword=None):
self._func = func
self._argument_index = index
self._keyword_argument = keyword
def __call__(self, x):
def _(*args, **kwd):
if self._keyword_argument is None:
args = list(args)
args.insert(self._argument_index, x)
else:
kwd = kwd.copy()
kwd[self._keyword_argument] = x
return self._func(*args, **kwd)
_._src_location = _location(1)[:2]
return _
def __getattr__(self, keyword):
self._keyword_argument = keyword
return self
curry = Curry
c_ = Curry
def call(f):
def _(*args, **kwd):
return f(*args, **kwd)
_._src_location = _location(1)[:2]
return _
``` |
{
"source": "jnana-cetana/XMeme",
"score": 2
} |
#### File: api/endpoints/memes.py
```python
from typing import Any, List
from fastapi import APIRouter, WebSocket, WebSocketDisconnect, BackgroundTasks, Depends, status
from fastapi.responses import Response
from sqlalchemy.orm import Session
from starlette.status import HTTP_404_NOT_FOUND
from app import crud, schemas
from app.api import deps, tasks
from app.core.manager import manager
router = APIRouter()
@router.get(
"",
response_model=List[schemas.Meme],
status_code=status.HTTP_200_OK
)
def read_memes(
db: Session = Depends(deps.get_db),
skip: int = 0,
limit: int = 100
) -> Any:
"""
Retrieve latest memes.
"""
memes = crud.meme.get_multi_latest(db=db, skip=skip, limit=limit)
return memes
@router.post(
"",
response_model=schemas.Meme,
response_model_include={"id"},
status_code=status.HTTP_201_CREATED
)
async def create_meme(
*,
db: Session = Depends(deps.get_db),
meme_in: schemas.MemeCreate,
background_tasks: BackgroundTasks
) -> Any:
"""
Create new meme.
"""
meme = crud.meme.create(db=db, obj_in=meme_in)
background_tasks.add_task(tasks.send_message, meme)
return meme
@router.patch(
"/{id}"
)
def update_meme(
*,
db: Session = Depends(deps.get_db),
id: int,
meme_in: schemas.MemeUpdate
) -> Any:
"""
Update a meme.
"""
meme = crud.meme.get(db=db, id=id)
if not meme:
return Response(status_code=status.HTTP_404_NOT_FOUND)
meme = crud.meme.update(db=db, db_obj=meme, obj_in=meme_in)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.delete(
"/{id}"
)
def delete_meme(
*,
db: Session = Depends(deps.get_db),
id: int
) -> Any:
"""
Delete a meme.
"""
meme = crud.meme.get(db=db, id=id)
if not meme:
return Response(status_code=status.HTTP_404_NOT_FOUND)
meme = crud.meme.remove(db=db, id=id)
return Response(status_code=status.HTTP_200_OK)
@router.get(
"/{id}",
response_model=schemas.Meme,
status_code=status.HTTP_200_OK
)
def read_meme_by_id(
*,
db: Session = Depends(deps.get_db),
id: int
) -> Any:
"""
Get a specific meme by id.
"""
meme = crud.meme.get(db=db, id=id)
if not meme:
return Response(status_code=status.HTTP_404_NOT_FOUND)
return meme
@router.websocket(
"/ws/{client_id}"
)
async def websocket_endpoint(websocket: WebSocket, client_id: str):
"""
Realtime Meme Feed
"""
await manager.connect(websocket)
await manager.broadcast(dict(id="XMeme Admin", message=f"Client #{client_id} joined the chat."))
try:
while True:
data = await websocket.receive_json()
await manager.broadcast(data)
except WebSocketDisconnect:
manager.disconnect(websocket)
await manager.broadcast(dict(id="XMeme Admin", message=f"Client #{client_id} left the chat."))
```
#### File: app/api/tasks.py
```python
from fastapi.encoders import jsonable_encoder
from app.schemas import Meme
from app.core.manager import manager
async def send_message(message: Meme):
obj_data = jsonable_encoder(message)
await manager.broadcast(obj_data)
``` |
{
"source": "JnaneshPrabhu/easy-mailer",
"score": 3
} |
#### File: easy-mailer/easy_mailer/send_mail.py
```python
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import logging
logger = logging.getLogger()
def appendAttachments(file,msg,filename):
# open the file to be sent
attachment = open(file, "rb")
# instance of MIMEBase and named as p
p = MIMEBase('application', 'octet-stream')
# To change the payload into encoded form
p.set_payload((attachment).read())
# encode into base64
encoders.encode_base64(p)
p.add_header('Content-Disposition', "attachment; filename= %s" % filename)
# attach the instance 'p' to instance 'msg'
msg.attach(p)
return msg
def send_mail(username, password, recipient_list, subject, message = '', files = [], filenames = []):
"""
This takes into input parameters as shown below
Args:
username: required
type: str
The email id of outlook from which one needs to send an email.
password: <PASSWORD>
type: str
Password of outlook email for authentication.
recipient list:
type: list
The recipients enclosed in a list
subject: required
type: str
The subject line of the mailer
message: optional
type: str
Any text to be displayed in body of the mailer. Please provide absolute local path of the attachment.
files: optional
type: Any
Attachments to be uploaded in the mailer. Note mail restrictions of memory still applies.
filenames: optional
type: List of str
Applicable only when files is not empty.
Pass on the list containing names of files one wants to display in the mail body. By default will show processed absolute path names.
Returns:
A mail is sent to intended recipients. Can be used to automate sending of mails/reports.
Raises:
KeyError: To be updated.
"""
msg = MIMEMultipart()
msg['From'] = username
msg['To'] = ', '.join(recipient_list)
msg['Subject'] = subject
if message != '':
msg.attach(MIMEText(message))
if len(files) > 0:
if len(filenames) != len(files):
logger.error('The filenames and files list donot match in length. Please check your input parameters and try again')
print('The filenames and files list donot match in length. Please check your input parameters and try again')
else:
for i in range(0,len(files)):
msg = appendAttachments(files[i],msg,filenames[i])
#Setting the threshold of logger to DEBUG
logger.setLevel(logging.DEBUG)
logger.info('Sending mail')
try:
mailServer = smtplib.SMTP('smtp-mail.outlook.com', 587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(username, password)
mailServer.sendmail(username, recipient_list, msg.as_string())
mailServer.close()
logger.info('Mail sent.')
except Exception as e:
logger.error(e)
``` |
{
"source": "jnankin/phaxio-python",
"score": 2
} |
#### File: swagger_client/models/country.py
```python
from pprint import pformat
from six import iteritems
import re
class Country(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, name=None, alpha2=None, country_code=None, price_per_page=None, send_support=None, receive_support=None):
"""
Country - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'name': 'str',
'alpha2': 'str',
'country_code': 'int',
'price_per_page': 'int',
'send_support': 'str',
'receive_support': 'str'
}
self.attribute_map = {
'name': 'name',
'alpha2': 'alpha2',
'country_code': 'country_code',
'price_per_page': 'price_per_page',
'send_support': 'send_support',
'receive_support': 'receive_support'
}
self._name = name
self._alpha2 = alpha2
self._country_code = country_code
self._price_per_page = price_per_page
self._send_support = send_support
self._receive_support = receive_support
@property
def name(self):
"""
Gets the name of this Country.
:return: The name of this Country.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""
Sets the name of this Country.
:param name: The name of this Country.
:type: str
"""
self._name = name
@property
def alpha2(self):
"""
Gets the alpha2 of this Country.
:return: The alpha2 of this Country.
:rtype: str
"""
return self._alpha2
@alpha2.setter
def alpha2(self, alpha2):
"""
Sets the alpha2 of this Country.
:param alpha2: The alpha2 of this Country.
:type: str
"""
self._alpha2 = alpha2
@property
def country_code(self):
"""
Gets the country_code of this Country.
:return: The country_code of this Country.
:rtype: int
"""
return self._country_code
@country_code.setter
def country_code(self, country_code):
"""
Sets the country_code of this Country.
:param country_code: The country_code of this Country.
:type: int
"""
self._country_code = country_code
@property
def price_per_page(self):
"""
Gets the price_per_page of this Country.
:return: The price_per_page of this Country.
:rtype: int
"""
return self._price_per_page
@price_per_page.setter
def price_per_page(self, price_per_page):
"""
Sets the price_per_page of this Country.
:param price_per_page: The price_per_page of this Country.
:type: int
"""
self._price_per_page = price_per_page
@property
def send_support(self):
"""
Gets the send_support of this Country.
:return: The send_support of this Country.
:rtype: str
"""
return self._send_support
@send_support.setter
def send_support(self, send_support):
"""
Sets the send_support of this Country.
:param send_support: The send_support of this Country.
:type: str
"""
self._send_support = send_support
@property
def receive_support(self):
"""
Gets the receive_support of this Country.
:return: The receive_support of this Country.
:rtype: str
"""
return self._receive_support
@receive_support.setter
def receive_support(self, receive_support):
"""
Sets the receive_support of this Country.
:param receive_support: The receive_support of this Country.
:type: str
"""
self._receive_support = receive_support
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
``` |
{
"source": "JNAnnis/Reports337",
"score": 4
} |
#### File: JNAnnis/Reports337/Report_05_Updated.py
```python
import numpy as np
import matplotlib.pyplot as plt
def cell_auto():
"""
Method to create a 2 x 2 subplot containing 4 plots with diffent cellular
automaton. Cellular automata have the following characteristics:
- A one dimensional grid with each cell having just two neighbors, one
on each side.
- Four possible cell states: 1, 2, 3, 4, that are represented by a
different color.
- Periodic edge conditions: the rightmost and leftmost cells are
neighbors.
- The transition rule is that the state of each cell is updated based
on the sum of the current cell state and those of its neighbors.
All cellular automata change everytime the function is run.
Parameters:
None
Returns:
None
"""
nGen = 25 # number of generations to apply transition rule to
nCells = 25 # number of cells
colors = np.array([[1, 0, 0], [0, 1, 1], [0.5, 0, 1], [1, 1, 0]],
dtype = float) # red, cyan, purple, yellow
plt.figure(figsize = (10, 10))
nRow = 2
nCol = 2
for j in range(1, nRow*nCol + 1):
cellState = np.empty((nGen, nCells), dtype = int)
cellState[0] = np.random.randint(4, size = nCells)
rule = np.random.randint(4, size = 10) # transition rule
for i in range(1, nGen):
sumCells = (cellState[i-1] + np.roll(cellState[i-1], -1)
+ np.roll(cellState[i-1], 1))
cellState[i] = rule[sumCells]
cellColor = colors[cellState]
plt.subplot(nRow, nCol, j)
plt.subplots_adjust(wspace = 0.5)
plt.imshow(cellColor, interpolation = 'None')
plt.title(str(j), fontsize = 16)
plt.xlabel(str(rule), fontsize = 16)
plt.xticks([])
plt.yticks([])
``` |
{
"source": "jnarhan/Breast_Cancer",
"score": 2
} |
#### File: src/helper_modules/jn_bc_helper.py
```python
import os
import gc
import csv
import sys
import time
import shutil
import itertools
import collections
import numpy as np
from scipy import misc
import keras.callbacks as cb
from keras.utils import np_utils
from matplotlib import pyplot as plt
def pprint(msg):
print '-' * len(msg)
print msg
print '-' * len(msg)
# Copy of D. Dittenhafer's loading and balancing by removal.
# Balances the data set by removing images from over-represented classes
def load_meta(metaFile, patho_idx, file_idx, balanceByRemoval = False, verbose = False):
bcMetaFile = {}
bcCounts = collections.defaultdict(int)
with open(metaFile, 'r') as csvfile:
bcCSV = csv.reader(csvfile)
headers = bcCSV.next()
for row in bcCSV:
patho = row[ patho_idx].lower()
bcMetaFile[ row[file_idx]] = patho
bcCounts[patho] += 1
if verbose:
pprint('Before Balancing')
for k in bcCounts:
print '{0:10}: {1}'.format(k, bcCounts[k])
if balanceByRemoval:
balanceViaRemoval(bcMetaFile, bcCounts, factor=1.0)
if verbose:
pprint('After Balancing')
for k in bcCounts:
print '{0:10}: {1}'.format(k, bcCounts[k])
return bcMetaFile, bcCounts
# Minor addition to only retain meta data on images that exist
def clean_meta(meta_data, imgPath):
print 'Number of entries in incoming meta_data: {}'.format(len(meta_data))
found = 0
not_found = 0
for i, fn in enumerate(meta_data.keys()):
filepath = os.path.join(imgPath, fn)
if os.path.exists(filepath):
found += 1
else:
del meta_data[fn]
not_found += 1
print 'Images found: {}'.format(found)
print 'Images missing: {}'.format(not_found)
print 'Number of entries of outgoing meta_data: {}'.format(len(meta_data))
return meta_data
def balanceViaRemoval(meta, counts, depth = 0, factor = 1.50):
if(depth >= 2):
return
# First get mean items per category
num_obs = len(meta)
num_classes = len(counts)
avgC = num_obs / num_classes
theshold = avgC * factor
if depth == 0:
print "balanceViaRemoval.avgC: " + str(avgC)
print "balanceViaRemoval.theshold: " + str(theshold)
# Determine categories for balancing.
toBeBalanced = []
for c in counts.keys():
if counts[c] > theshold:
toBeBalanced.append(c)
# iterate over categories to be balanced and do balancing.
for b in toBeBalanced:
candidatesForRemoval = []
for f in meta.keys():
if meta[f] == b:
candidatesForRemoval.append(f)
np.random.shuffle(candidatesForRemoval)
candidatesForRemoval = candidatesForRemoval[avgC:]
for c in candidatesForRemoval:
del meta[c]
counts[b] = avgC
balanceViaRemoval(meta, counts, depth + 1, factor)
def get_clsCnts(y_data, cats):
ys = np.ravel(y_data)
labels = reverseDict(cats)
bcCounts = collections.defaultdict(int)
for lab in ys:
bcCounts[lab] += 1
try:
for key, value in labels.items():
bcCounts[value] = bcCounts.pop(key)
except KeyError:
pass
return bcCounts
# Alternative to balancing by over-sampling of minority cases through synthetic augmentation
def balanceViaSmote(cls_cnts, meta_info, data_dir, aug_dir, catagories,
datagen, X_data, Y_data, imgResize = None, seed=None, verbose=False):
aug_imgs = []
if seed:
np.random.seed(seed)
max_class_key = max(cls_cnts, key=cls_cnts.get)
max_class_val = cls_cnts[ max_class_key ]
for key, value in cls_cnts.items():
if key == max_class_key:
pass
else:
grow_by = max_class_val - value
imgs = {k:v for k, v in meta_info.items() if v == key}
# take a random selection of grow_by size, with replacement
key_indxs = np.random.choice(imgs.keys(), size=grow_by, replace=True)
for k1 in key_indxs:
aug_imgs.append({k:v for k,v in imgs.items() if k == k1})
save_dir = aug_dir + key + '/'
# Overwrite folder and contents if folder exists:
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
os.makedirs(save_dir)
# Load randomly selected images of given catagory into memory
aug_X = list()
aug_Y = list()
for i in aug_imgs:
img_x, img_y = load_data(i, data_dir, catagories, imgResize=imgResize)
aug_X.append(img_x)
aug_Y.append(img_y)
# Generate augmented images
aug_X = np.reshape(aug_X, (len(aug_X), aug_X[0].shape[0], aug_X[0].shape[1], aug_X[0].shape[2]))
for x_batch, y_batch in datagen.flow(aug_X, aug_Y, batch_size=len(aug_X), seed=seed,
save_to_dir= save_dir,
save_prefix= key + '_aug',
save_format= 'png'):
X_data = np.concatenate(
(X_data, np.reshape(x_batch, (len(x_batch), x_batch.shape[2], x_batch.shape[3]))))
Y_data = np.concatenate((Y_data, np.reshape(y_batch, (len(y_batch), y_batch.shape[2]))))
break
if verbose:
bcCounts = get_clsCnts(Y_data, catagories)
pprint('After Balancing')
for k in bcCounts:
print '{0:10}: {1}'.format(k, bcCounts[k])
return X_data, Y_data
def bcLabels(items):
labels = {}
for i, item in enumerate(items):
labels[item] = i
return labels
def reverseDict(d):
ndxBC = {}
for k in d:
ndxBC[d[k]] = k
return ndxBC
# Adapted from <NAME>
def load_data(metaData, imgPath, categories, imgSize = (255,255), imgResize = None,
verbose = True, verboseFreq = 200):
total = len(metaData)
x, y = imgSize
if imgResize is not None:
x, y = imgResize
# Allocate containers for the data
X_data = np.zeros( [total, x, y])
Y_data = np.zeros( [total, 1], dtype=np.int8)
# Load images based on meta_data:
for i, fn in enumerate( metaData.keys()):
filepath = os.path.join(imgPath, fn)
if os.path.exists(filepath):
img = misc.imread(filepath, flatten=True)
else:
img = None
print "Not Found: " + filepath
if imgResize is not None:
img = misc.imresize(img, imgResize)
gc.collect()
X_data[i] = img
Y_data[i] = categories[ metaData[fn].lower()]
X_data = X_data.astype('float32')
X_data /= float(255)
return X_data, Y_data
def prep_data(data, labels):
print 'Prep data for NNs ...'
X_train, X_test, y_train, y_test = data
# one-hot encoding of output i.e int to binary matrix rep:
y_train = np_utils.to_categorical(zip(*y_train)[0], len(labels))
y_test = np_utils.to_categorical(zip(*y_test)[0], len(labels))
channel, width, height = (1, X_train[0].shape[0], X_train[0].shape[1])
# CNN require [channel e.g grayscale = 1][width][height]
X_train = np.reshape(X_train, (len(X_train), channel, width, height))
X_test = np.reshape(X_test, (len(X_test), channel, width, height))
print 'Data Prepped for Neural Nets.'
return [X_train, X_test, y_train, y_test]
class LossHistory(cb.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.acc = []
def on_epoch_end(self, epoch, logs={}):
epoch_tr_loss = logs.get('loss')
epoch_val_loss = logs.get('val_loss')
self.losses.append([epoch_tr_loss, epoch_val_loss])
epoch_tr_acc = logs.get('acc')
epoch_val_acc = logs.get('val_acc')
self.acc.append([epoch_tr_acc, epoch_val_acc])
def run_network(data, model, aug=False, dataGen=None, earlyStop=False, epochs=20, batch=256, seed=7):
try:
if aug and (dataGen is None):
raise ValueError('Attempting to augment data without providing inline data generator.')
start_time = time.time()
cbs = []
X_train, X_test, y_train, y_test = data
history = LossHistory()
cbs.append(history)
if earlyStop:
earlyStopping = cb.EarlyStopping(monitor='val_loss', min_delta=0, patience=1, verbose=2, mode='auto')
cbs.append(earlyStopping)
print 'Training model...'
if not aug:
model.fit(X_train, y_train, epochs=epochs, batch_size=batch,
callbacks=cbs, validation_data=(X_test, y_test), verbose=2)
else:
model.fit_generator(dataGen.flow(X_train, y_train, batch_size=batch, seed=seed),
steps_per_epoch=len(X_train) / batch,
epochs=epochs,
callbacks=cbs,
validation_data=(X_test, y_test), verbose=2)
print "Training duration : {0}".format(time.time() - start_time)
score = model.evaluate(X_test, y_test, batch_size=16, verbose=2) # Evaluate the model
print "Network's test score [loss, accuracy]: {0}".format(score)
print 'CNN Error: {:.2f}%'.format(100 - score[1] * 100)
return model, history.losses, history.acc, score
except ValueError as err:
print 'Error: {}'.format(err)
sys.exit(1)
except KeyboardInterrupt:
print 'KeyboardInterrupt'
return model, history.losses
def predict(model, images):
return model.predict_classes(images, verbose=2)
def plot_losses(losses, acc):
fig = plt.figure()
ax = fig.add_subplot(221)
ax.plot(losses)
ax.set_title('Model Loss')
ax.set_ylabel('loss')
ax.set_xlabel('epoch')
ax.legend(['train', 'test'], loc='upper left')
ax = fig.add_subplot(222)
ax.plot(acc)
ax.set_title('Model Accuracy')
ax.set_ylabel('accuracy')
ax.set_xlabel('epoch')
ax.legend(['train', 'test'], loc='upper left')
def save_model(dir_path, model, name):
curr_dir = os.getcwd()
os.chdir(dir_path)
with open(name + "_model.yaml", "w") as yaml_file:
yaml_file.write(model.to_yaml())
model.save_weights(name + "_weights.hd5", overwrite=True)
print ("Model and Weights Saved to Disk")
os.chdir(curr_dir)
# From: http://scikit-learn.org/stable/auto_examples/model_selection/
# plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, '{0:.4f}'.format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def cat_stats(matrix, TP_idx=3):
matrix = matrix.flatten()
TP = matrix[TP_idx]
if TP_idx == 3:
TN = matrix[0]
FP = matrix[1]
FN = matrix[2]
elif TP_idx == 0:
TN = matrix[3]
FP = matrix[2]
FN = matrix[1]
else:
print ('ERROR: Uncertain how to interpret confusion matrix')
# Refers to the test's ability to correctly detect patients who do have the condition.
# When it’s actually yes, how often does it predict yes:
Sensitivity = TP / float(TP + FN)
# Refers to the test's ability to correctly detect patients without a condition.
# When it’s actually no, how often predict no.
Specificity = TN / float(FP + TN)
# The proportion of positive test results to the number of true positives.
# When it predicts yes, how often is it correct.
PPV = TP / float(TP + FP)
# The proportion of negative test results to the number of true negatives.
# When it predicts no, how often is is correct.
NPV = TN / float(TN + FN)
# Reaches its best at 1 and worst at 0.
F1 = float((Sensitivity * PPV) / (Sensitivity + PPV)) * 2
# Overall, how often is the classifier correct.
Class_Acc = (TP + TN) / float(TP + FN + FP + TN)
return {'Sensitivity': round(Sensitivity * 100, 2), 'Specificity': round(Specificity * 100, 2),
'PPV': round(PPV * 100, 2), 'NPV': round(NPV * 100, 2), 'F1': round(F1, 2),
'Accuracy': round(Class_Acc * 100, 2)}
if __name__ == '__main__':
# Add test cases here
test_mat = np.array([[228, 40],
[37, 257]])
print cat_stats(test_mat)
```
#### File: src/models/dwdii_bc_model_helper.py
```python
__author__ = '<NAME>'
import collections
import csv
import os
import random
import sys
import gc
import itertools
from decimal import *
from scipy import misc
from scipy import ndimage
import numpy as np
from scipy import misc
from scipy import ndimage
import cv2
import matplotlib.cm as cm
import matplotlib.pyplot as plt
NDX_NAME = 0
NDX_TYPE = 1
NDX_ABTYPE = 2
NDX_SCANNER = 3
NDX_SUBFOLDER = 4
NDX_PATHOLOGY = 5
def load_training_metadata(metadataFile,
balanceViaRemoval = False,
verbose=False,
exclude = ['unproven', 'pathology', 'benign_without_callback'],
normalVsAbnormal=False):
""" Loads the designated meta data optionally balancing the data by removing heavily weighted category entries.
3 result sets are returned:
1) Dictionary where key = filename and value = label (normal, benign, malignant)
2) Dictionary where key = filename and value = list with values sub folder)= (0,1,2,3,4)
3) Dictionary where key = label (normal, benign, etc) and value = count of images in category.
:param metadataFile:
:param balanceViaRemoval:
:param verbose:
:param exclude:
:return:
"""
# Load the existing CSV so we can skip what we've already worked on
abnormalList = ["benign", "malignant"]
bcDict = {}
bcMetaDict = {}
bcCounts = collections.defaultdict(int)
with open(metadataFile, 'r') as csvfile:
bcCsv = csv.reader(csvfile)
headers = bcCsv.next()
for row in bcCsv:
subfld = row[NDX_SUBFOLDER]
patho = row[NDX_PATHOLOGY].lower()
if patho == "":
patho = "normal"
if patho in exclude:
pass
else:
if normalVsAbnormal and (patho in abnormalList):
patho = "abnormal"
# Load into our result sets
bcDict[row[0]] = patho
bcMetaDict[row[0]] = (subfld)
bcCounts[patho] += 1
if verbose:
print "Raw Balance"
print "----------------"
for e in bcCounts:
print e, bcCounts[e]
if balanceViaRemoval:
balanaceViaRemoval(bcCounts, bcDict, factor=1.0)
if verbose:
print
print "After Balancing"
print "----------------"
for e in bcCounts:
print e, bcCounts[e]
return bcDict, bcMetaDict, bcCounts
def balanaceViaRemoval(emoCounts, emoDict, depth = 0, factor = 1.50):
if(depth >= 2):
return
# First get mean items per category
sum = len(emoDict)
avgE = sum / len(emoCounts)
theshold = avgE * factor
if depth == 0:
print "balanaceViaRemoval.avgE: " + str(avgE)
print "balanaceViaRemoval.theshold: " + str(theshold)
# Determine categories for balancing.
toBeBalanced = []
for e in emoCounts.keys():
if emoCounts[e] > theshold:
toBeBalanced.append(e)
# iterate over categories to be balanced and do balancing.
for b in toBeBalanced:
candidatesForRemoval = []
for f in emoDict.keys():
if emoDict[f] == b:
candidatesForRemoval.append(f)
random.shuffle(candidatesForRemoval)
candidatesForRemoval = candidatesForRemoval[avgE:]
for c in candidatesForRemoval:
del emoDict[c]
emoCounts[b] = avgE
balanaceViaRemoval(emoCounts, emoDict, depth + 1, factor)
def bcNumerics():
emoNdx = {}
emoNdx["normal"] = 0
emoNdx["benign"] = 1
emoNdx["malignant"] = 2
return emoNdx
def numericBC():
emoNdx = bcNumerics()
ndxEmo = {}
for k in emoNdx:
ndxEmo[emoNdx[k]] = k
return ndxEmo
def bcNormVsAbnormNumerics():
emoNdx = {}
emoNdx["normal"] = 0
emoNdx["abnormal"] = 1
return emoNdx
def reverseDict(d):
ndxEmo = {}
for k in d:
ndxEmo[d[k]] = k
return ndxEmo
def load_data(metadataFile,
imagesPath,
categories = bcNumerics(),
verbose=True,
verboseFreq = 200,
maxData = None,
imgSize = (350, 350),
imgResize = None,
thesePathos = None,
normalVsAbnormal = False):
"""Helper function to load the training/test data"""
show = False
# Load the CSV meta data
emoMetaData, bcDetaDict, bcCounts = load_training_metadata(metadataFile, True, verbose=verbose, normalVsAbnormal=normalVsAbnormal)
total = len(emoMetaData)
ndx = 0
x, y = imgSize
if imgResize is not None:
x, y = imgResize
if maxData is not None:
total = maxData
# Allocate containers for the data
X_data = np.zeros([total, x, y])
Y_data = np.zeros([total, 1], dtype=np.int8)
# load the image bits based on what's in the meta data
for k in emoMetaData.keys():
if thesePathos is None or emoMetaData[k] in thesePathos:
# Load the file
filepath = os.path.join(imagesPath, bcDetaDict[k][0], k)
#filepath = filepath + ".png"
if(os.path.exists(filepath)):
img = misc.imread(filepath, flatten = True) # flatten = True?
else:
img = None
print "Not Found: " + filepath
# Only accept images that were loaded
if img is not None:
# Verbose status
if verbose and ndx % verboseFreq == 0:
msg = "{0:.4f}: {1}\r\n".format(ndx / Decimal(total), k)
sys.stdout.writelines(msg)
# Resize if desired.
if imgResize is not None:
img = misc.imresize(img, imgResize)
gc.collect()
if show:
plt.imshow(img, cmap=cm.gray)
plt.show()
X_data[ndx] = img
rawEmotion = emoMetaData[k]
emotionKey = rawEmotion.lower()
emotionNdx = categories[emotionKey]
Y_data[ndx] = emotionNdx
ndx += 1
if maxData is not None and maxData <= ndx:
break
Y_data = Y_data[:ndx]
X_data = X_data[:ndx]
X_data = X_data.astype('float32')
X_data /= 255.0
return X_data, Y_data
def to_categorical(y, nb_classes=None):
'''Convert class vector (integers from 0 to nb_classes) to binary class matrix, for use with categorical_crossentropy.
# Arguments
y: class vector to be converted into a matrix
nb_classes: total number of classes
# Returns
A binary matrix representation of the input.
'''
if not nb_classes:
nb_classes = np.max(y)+1
Y = np.zeros((len(y), nb_classes))
for i in range(len(y)):
Y[i, y[i]] = 1.
return Y
def getNameParts(name):
parts = name.split(".")
sideParts = parts[1].split("_")
case = parts[0]
side = sideParts[0]
return case, side
def splitTrainTestValSets(metadataFile, valCsv, testCsv, trainCsv, valSize = 100, trainPct = 0.80, seed = 20275):
"""Generates 3 CSV files containing the meta data split from the source meta data file. First a Numpy
random shuffle is performed on the data loaded from the metadataFile.
:param metadataFile: the path to the source CSV file
:param valCsv: The path to the output CSV to be overwritten by the new validation meta data.
:param testCsv: The path to the output CSV to be overwritten by the new test meta data.
:param trainCsv: The path to the output CSV to be overwritten by the new train meta data.
:param valSize: The number of data rows to pull out for validation purposes
:param trainPct: Of the remaining data rows after the validation rows have been removed, the percent of
data to seperate for training purposes. After the training data is extracted, the final
remaining data is saved to the test data set.
"""
caseSides = {}
with open(metadataFile, 'r') as csvfile:
bcCsv = csv.reader(csvfile)
headers = bcCsv.next()
headers = bcCsv.next()
for row in bcCsv:
case, side = getNameParts(row[NDX_NAME])
key = "{0}-{1}".format(case, side)
# build list of case-sides
caseSides[key] = (case, side)
# Split the keys up
csKeys = caseSides.keys()
# Shuffle
np.random.seed(seed)
np.random.shuffle(csKeys)
valKeys = csKeys[0 : valSize]
remainingKeys = csKeys[valSize + 1 : len(csKeys) - 1]
trainNdx = int(round(len(remainingKeys) * trainPct))
trainKeys = remainingKeys[0 : trainNdx]
testKeys = remainingKeys[trainNdx + 1 : len(remainingKeys) - 1]
# split the actual meta data
with open(metadataFile, 'r') as csvfile:
with open(valCsv, 'wb') as valfile:
with open(testCsv, 'wb') as testfile:
with open(trainCsv, 'wb') as trainfile:
bcCsv = csv.reader(csvfile)
valCsv = csv.writer(valfile)
testCsv = csv.writer(testfile)
trainCsv = csv.writer(trainfile)
headers = bcCsv.next()
headers = bcCsv.next()
valCsv.writerow(headers)
testCsv.writerow(headers)
trainCsv.writerow(headers)
for row in bcCsv:
case, side = getNameParts(row[NDX_NAME])
key = "{0}-{1}".format(case, side)
if(key in valKeys):
valCsv.writerow(row)
elif (key in testKeys):
testCsv.writerow(row)
elif (key in trainKeys):
trainCsv.writerow(row)
return trainKeys, testKeys, valKeys
# for k in csKeys:
def load_mias_labeldata(metadataFile, skip_lines=102):
ld = {}
with open(metadataFile, 'r') as csvfile:
emoCsv = csv.reader(csvfile, delimiter=' ')
# skip first 104 lines of description info
for i in range(0, skip_lines):
emoCsv.next()
for row in emoCsv:
if len(row) >= 2:
ld[row[0]] = [row[2]]
if row[2] != "NORM":
ld[row[0]].append(row[3])
return ld
# From: http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, '{0:.4f}'.format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def cleanDataSet(csvFile, imageRoot):
data = []
with open(csvFile, 'r') as csvfile:
bcCsv = csv.reader(csvfile)
headers = bcCsv.next()
for row in bcCsv:
name = row[NDX_NAME]
subfld = row[NDX_SUBFOLDER]
fullName = os.path.join(imageRoot, subfld, name)
if os.path.exists(fullName):
data.append(row)
else:
print "Not found: " + fullName
with open(csvFile + "2.csv", 'wb') as file:
dataCsv = csv.writer(file)
dataCsv.writerow(headers)
for row in data:
dataCsv.writerow(row)
def reflectY(img):
tx = [[1, 0], [0, -1]]
offset = [0, img.shape[0]]
img2 = ndimage.interpolation.affine_transform(img, tx, offset)
return img2
``` |
{
"source": "jnarowski/crypto-index-fund-bot",
"score": 3
} |
#### File: crypto-index-fund-bot/bot/commands.py
```python
import typing as t
from decimal import Decimal
from . import (
convert_stablecoins,
exchanges,
market_buy,
market_cap,
open_orders,
portfolio,
)
from .data_types import CryptoBalance, MarketBuyStrategy, SupportedExchanges
from .user import User
# TODO not really sure the best pattern for implementing the command/interactor pattern but we are going to give this a try
class PortfolioCommand:
@classmethod
def execute(cls, user: User) -> t.List[CryptoBalance]:
portfolio_target = market_cap.coins_with_market_cap(user)
external_portfolio = user.external_portfolio
user_portfolio = []
# pull a raw binance reportfolio from exchanges.py and add percentage allocations to it
for exchange in user.exchanges:
user_portfolio = exchanges.portfolio(exchange, user)
# TODO when we actually support multiple exchanges we'll need to do something like this
# user_portfolio = portfolio.merge_portfolio(user_portfolio, external_portfolio)
user_portfolio = portfolio.merge_portfolio(user_portfolio, external_portfolio)
user_portfolio = portfolio.add_price_to_portfolio(user_portfolio, user.purchasing_currency)
user_portfolio = portfolio.portfolio_with_allocation_percentages(user_portfolio)
user_portfolio = portfolio.add_missing_assets_to_portfolio(user, user_portfolio, portfolio_target)
user_portfolio = portfolio.add_percentage_target_to_portfolio(user_portfolio, portfolio_target)
# TODO https://github.com/python/typing/issues/760
# highest percentages first in the output table
user_portfolio.sort(key=lambda balance: balance["target_percentage"], reverse=True)
return user_portfolio
class BuyCommand:
# TODO we should break this up into smaller functions
@classmethod
def execute(cls, user: User, purchase_balance: t.Optional[Decimal] = None) -> t.Tuple[Decimal, t.List, t.List]:
if user.buy_strategy == MarketBuyStrategy.LIMIT and user.cancel_stale_orders:
open_orders.cancel_stale_open_orders(user, SupportedExchanges.BINANCE)
# TODO support multiple exchanges here
current_portfolio = exchanges.portfolio(SupportedExchanges.BINANCE, user)
if user.convert_stablecoins:
convert_stablecoins.convert_stablecoins(user, SupportedExchanges.BINANCE, current_portfolio)
# TODO we should wait for the stablecoin sells to clear and then refresh the portfolio
external_portfolio = user.external_portfolio
external_portfolio = portfolio.add_price_to_portfolio(external_portfolio, user.purchasing_currency)
current_portfolio = portfolio.merge_portfolio(current_portfolio, external_portfolio)
current_portfolio = portfolio.add_price_to_portfolio(current_portfolio, user.purchasing_currency)
current_portfolio = portfolio.portfolio_with_allocation_percentages(current_portfolio)
# TODO we should protect against specifying purchasing currency when in livemode
# also, I don't love that this parameter is passed in, feels odd
# TODO this needs to be adjusted for a particular exchange
if not purchase_balance:
purchase_balance = market_buy.purchasing_currency_in_portfolio(user, current_portfolio)
portfolio_target = market_cap.coins_with_market_cap(user)
sorted_market_buys = market_buy.calculate_market_buy_preferences(
portfolio_target, current_portfolio, deprioritized_coins=user.deprioritized_coins
)
market_buys = market_buy.determine_market_buys(user, sorted_market_buys, current_portfolio, portfolio_target, purchase_balance)
completed_orders = market_buy.make_market_buys(user, market_buys)
return (purchase_balance, market_buys, completed_orders)
```
#### File: crypto-index-fund-bot/bot/market_buy.py
```python
import typing as t
from decimal import Decimal
from . import exchanges
from .data_types import (
CryptoBalance,
CryptoData,
MarketBuy,
MarketBuyStrategy,
SupportedExchanges,
)
from .user import User
from .utils import log
def calculate_market_buy_preferences(
target_index: t.List[CryptoData],
current_portfolio: t.List[CryptoBalance],
deprioritized_coins: t.List[str],
) -> t.List[CryptoData]:
"""
Buying priority:
1. Buying what hasn't be deprioritized by the user
2. Buying what has > 1% of the market cap
3. Buying what's unique to this exchange
4. Buying something new, as opposed to getting closer to a new allocation
5. Buying whatever has dropped the most
6. Buying what has the most % delta from the target
Filter out coins that have exceeded their targets
"""
log.info("calculating market buy preferences", target_index=len(target_index), current_portfolio=len(current_portfolio))
coins_below_index_target: t.List[CryptoData] = []
# first, let's exclude all coins that we've exceeded target on
for coin_data in target_index:
current_percentage = next((balance["percentage"] for balance in current_portfolio if balance["symbol"] == coin_data["symbol"]), 0)
if current_percentage < coin_data["percentage"]:
coins_below_index_target.append(coin_data)
else:
log.debug("coin exceeding target, skipping", symbol=coin_data["symbol"], percentage=current_percentage, target=coin_data["percentage"])
sorted_by_largest_target_delta = sorted(
coins_below_index_target,
key=lambda coin_data: next((balance["percentage"] for balance in current_portfolio if balance["symbol"] == coin_data["symbol"]), Decimal(0))
- coin_data["percentage"],
)
# TODO think about grouping drops into tranches so the above sort isn't completely useless
sorted_by_largest_recent_drop = sorted(
sorted_by_largest_target_delta,
# TODO should we use 7d change vs 30?
key=lambda coin_data: coin_data["change_30d"],
)
# prioritize tokens we don't own yet
symbols_in_current_allocation = [item["symbol"] for item in current_portfolio]
sorted_by_unowned_coins = sorted(
sorted_by_largest_recent_drop, key=lambda coin_data: 1 if coin_data["symbol"] in symbols_in_current_allocation else 0
)
# prioritize tokens that make up > 1% of the market
# and either (a) we don't own or (b) our target allocation is off by a factor of 6
# why 6? It felt right based on looking at what I wanted out of my current allocation
def should_token_be_treated_as_unowned(coin_data: CryptoData) -> int:
if coin_data["percentage"] < 1:
return 1
current_percentage = next((balance["percentage"] for balance in current_portfolio if balance["symbol"] == coin_data["symbol"]), 0)
if current_percentage == 0:
return 0
current_allocation_delta = coin_data["percentage"] / current_percentage
if current_allocation_delta > 6:
return 0
else:
return 1
sorted_by_large_market_cap_coins = sorted(sorted_by_unowned_coins, key=should_token_be_treated_as_unowned)
# last, but not least, let's respect the user's preference for deprioritizing coins
sorted_by_deprioritized_coins = sorted(
sorted_by_large_market_cap_coins, key=lambda coin_data: 1 if coin_data["symbol"] in deprioritized_coins else 0
)
return sorted_by_deprioritized_coins
def purchasing_currency_in_portfolio(user: User, portfolio: t.List[CryptoBalance]) -> Decimal:
# ideally, we wouldn't need to have a reserve amount. However, FP math is challenging and it's easy
# to be off a cent or two. It's easier just to reserve $1 and not deal with it. Especially for a fun project.
reserve_amount = 1
total = sum([balance["usd_total"] for balance in portfolio if balance["symbol"] == user.purchasing_currency])
# TODO we need some sort of `max` overload to treat a decimal as a `SupportsLessThanT`
return max(total - reserve_amount, Decimal(0)) # type: ignore
def determine_market_buys(
user: User,
sorted_buy_preferences: t.List[CryptoData],
current_portfolio: t.List[CryptoBalance],
target_portfolio: t.List[CryptoData],
purchase_balance: Decimal,
) -> t.List[MarketBuy]:
"""
1. Is the asset currently trading?
2. Do we have the minimum purchase amount?
3. Are there open orders for the asset already?
"""
# binance fees are fixed based on account configuration (BNB amounts, etc) and cannot be pulled dynamically
# so we don't worry or calculate these as part of our buying preference calculation
# TODO we'll need to explore if this is different for other exchanges
# it doesn't look like this is specified in the API, and the minimum is different
# depending on if you are using the pro vs simple view. This is the purchasing minimum on binance
# but not on
exchange_purchase_minimum = exchanges.purchase_minimum(SupportedExchanges.BINANCE)
user_purchase_minimum = user.purchase_min
user_purchase_maximum = user.purchase_max
portfolio_total = sum(balance["usd_total"] for balance in current_portfolio)
if purchase_balance < exchange_purchase_minimum:
log.info("not enough USD to buy anything", purchase_balance=purchase_balance)
return []
log.info(
"enough purchase currency balance",
balance=purchase_balance,
exchange_minimum=exchange_purchase_minimum,
user_minimum=user_purchase_minimum,
)
purchase_total = purchase_balance
purchases = []
existing_orders = exchanges.open_orders(SupportedExchanges.BINANCE, user)
symbols_of_existing_orders = [order["symbol"] for order in existing_orders]
for coin in sorted_buy_preferences:
# TODO may make sense in the future to check the purchase amount and adjust the expected
if coin["symbol"] in symbols_of_existing_orders:
# TODO add current order information to logs
log.info("already have an open order for this coin", coin=coin)
continue
paired_symbol = coin["symbol"] + user.purchasing_currency
if not exchanges.can_buy_amount_in_exchange(paired_symbol):
continue
# round up the purchase amount to the total available balance if we don't have enough to buy two tokens
purchase_amount = purchase_total if purchase_total < exchange_purchase_minimum * 2 else user_purchase_minimum
# percentage is not expressed in a < 1 float, so we need to convert it
coin_portfolio_info = next((target for target in target_portfolio if target["symbol"] == coin["symbol"]))
target_amount = coin_portfolio_info["percentage"] / 100 * portfolio_total
# make sure purchase total will not overflow the target allocation
purchase_amount = min(purchase_amount, target_amount, user_purchase_maximum)
# make sure the floor purchase amount is at least the user-specific minimum
purchase_amount = max(purchase_amount, user_purchase_minimum)
# we need to at least buy the minimum that the exchange allows
purchase_amount = max(exchange_purchase_minimum, purchase_amount)
# TODO right now the minNotional filter is NOT respected since the user min is $30, which is normally higher than this value
# this is something we'll have to handle properly in the future
# minimum_token_quantity_in_exchange(paired_symbol)
# symbol_info = public_binance_client.get_symbol_info(paired_symbol)
# tick_size = next(f['minNotional'] for f in symbol_info['filters'] if f['filterType'] == 'PRICE_FILTER')
if purchase_amount > purchase_total:
log.info("not enough purchase currency balance for coin", amount=purchase_amount, balance=purchase_total, coin=coin["symbol"])
continue
log.info("adding purchase preference", symbol=coin["symbol"], amount=purchase_amount)
purchases.append(
{
"symbol": coin["symbol"],
# TODO should we include the paired symbol in this data structure?
# amount in purchasing currency, not a quantity of the symbol to purchase
"amount": purchase_amount,
}
)
purchase_total -= purchase_amount
if purchase_total <= 0:
break
return purchases
# https://www.binance.us/en/usercenter/wallet/money-log
def make_market_buys(user: User, market_buys: t.List[MarketBuy]) -> t.List:
if not market_buys:
return []
purchasing_currency = user.purchasing_currency
orders = []
# TODO consider executing limit orders based on the current market orders
# this could ensure we don't overpay for an asset with low liquidity
for buy in market_buys:
symbol = buy["symbol"]
amount = buy["amount"]
if user.buy_strategy == MarketBuyStrategy.LIMIT:
from . import limit_buy
limit_price = limit_buy.determine_limit_price(user, symbol, purchasing_currency)
order_quantity = Decimal(buy["amount"]) / limit_price
order = exchanges.limit_buy(
exchange=SupportedExchanges.BINANCE,
user=user,
purchasing_currency=purchasing_currency,
symbol=symbol,
quantity=order_quantity,
price=limit_price,
)
else: # market
order = exchanges.market_buy(
exchange=SupportedExchanges.BINANCE, user=user, symbol=symbol, purchasing_currency=purchasing_currency, amount=amount
)
orders.append(order)
# in testmode, or in the case of an error, the result is an empty dict
# remove this since it doesn't provide any useful information and is confusing to parse downstream
return list(filter(None, orders))
```
#### File: crypto-index-fund-bot/bot/utils.py
```python
from rich.traceback import install as install_rich_tracebacks
# install_rich_tracebacks(show_locals=True, width=200)
install_rich_tracebacks(width=200)
import logging
import typing as t
import structlog
from decouple import config
from structlog.threadlocal import wrap_dict
def setLevel(level):
level = getattr(logging, level.upper())
structlog.configure(
# context_class enables thread-local logging to avoid passing a log instance around
# https://www.structlog.org/en/21.1.0/thread-local.html
context_class=wrap_dict(dict),
wrapper_class=structlog.make_filtering_bound_logger(level),
cache_logger_on_first_use=True,
)
# TODO maybe round floats automatically? https://github.com/kiwicom/kiwi-structlog-config/blob/dc6bba731de956e0a76f148d0c77bd419bd95283/kw/structlog_config/processors.py#L16
log_level = config("LOG_LEVEL", default="WARN")
setLevel(log_level)
log = structlog.get_logger()
_cached_result = {}
def cached_result(key: str, func: t.Callable):
if in_django_environment():
from django.core.cache import cache
if cached_value := cache.get(key):
return cached_value
# use a 30m timeout by default for now
value = func()
cache.set(key, value, timeout=60 * 30)
return value
else:
# if no django, then setup a simple dict-based cache to avoid
# hitting the APIs too many times within a single process
global _cached_result
if key in _cached_result:
return _cached_result[key]
value = func()
_cached_result[key] = value
return value
def in_django_environment():
return config("DJANGO_SETTINGS_MODULE", default=None) != None
def currency_format(value):
# https://stackoverflow.com/questions/320929/currency-formatting-in-python
import locale
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
return locale.currency(value, grouping=True)
def table_output_with_format(array_of_dicts, format):
if not array_of_dicts:
return None
if format == "md":
return markdown_table_output(array_of_dicts)
else:
return csv_table_output(array_of_dicts)
def markdown_table_output(array_of_dicts):
# TODO would be nice to add comma separators to money values
# note all table formats allow float formatting
from tabulate import tabulate
return tabulate(array_of_dicts, headers="keys", tablefmt="github", floatfmt=".2f")
def csv_table_output(array_of_dicts):
import csv
import sys
# TODO return CSV as a string
writer = csv.writer(sys.stdout)
writer.writerow(array_of_dicts[0].keys())
writer.writerows([row.values() for row in array_of_dicts])
```
#### File: crypto-index-fund-bot/test/test_buy_command.py
```python
import unittest
from decimal import Decimal
from unittest.mock import patch
import binance.client
import pytest
from bot.commands import BuyCommand
from bot.data_types import (
ExchangeOrder,
MarketBuyStrategy,
MarketIndexStrategy,
OrderTimeInForce,
OrderType,
SupportedExchanges,
)
from bot.user import user_from_env
@pytest.mark.vcr
class TestBuyCommand(unittest.TestCase):
PURCHASE_MIN = 25
# initial buys should prioritize coins that take up a large amount of the index first
@patch.object(binance.client.Client, "order_market_buy", side_effect=pytest.helpers.mocked_order_result)
@patch.object(binance.client.Client, "get_open_orders", return_value=[])
@patch("bot.exchanges.binance_portfolio", return_value=[])
def test_initial_buy(self, _binance_portfolio_mock, _open_order_mock, order_market_buy_mock):
from bot.commands import BuyCommand
user = user_from_env()
user.external_portfolio = []
user.purchase_min = self.PURCHASE_MIN
user.buy_strategy = MarketBuyStrategy.MARKET
assert user.external_portfolio == []
assert set(user.deprioritized_coins) == set(["DOGE", "XRP", "BNB"])
assert True == user.livemode
assert self.PURCHASE_MIN == user.purchase_min
assert MarketBuyStrategy.MARKET == user.buy_strategy
assert MarketIndexStrategy.MARKET_CAP == user.index_strategy
BuyCommand.execute(user=user, purchase_balance=Decimal(self.PURCHASE_MIN * 3))
# make sure the user minimum is respected
assert float(order_market_buy_mock.mock_calls[0].kwargs["quoteOrderQty"]) == self.PURCHASE_MIN
all_order_tokens = [mock_call.kwargs["symbol"] for mock_call in order_market_buy_mock.mock_calls]
# top market tokens should be prioritized
assert set(all_order_tokens) == set(["BTCUSD", "ETHUSD", "ADAUSD"])
@patch.object(binance.client.Client, "order_market_buy", side_effect=pytest.helpers.mocked_order_result)
@patch.object(binance.client.Client, "get_open_orders", return_value=[])
@patch("bot.exchanges.binance_portfolio", return_value=[])
def test_off_allocation_portfolio(self, _binance_portfolio_mock, _open_order_mock, order_market_buy_mock):
user = user_from_env()
user.purchase_min = self.PURCHASE_MIN
user.buy_strategy = MarketBuyStrategy.MARKET
user.external_portfolio = [ # type: ignore
{"symbol": "DOGE", "amount": Decimal("1000000")},
{"symbol": "ETH", "amount": Decimal("0.05")},
{"symbol": "BTC", "amount": Decimal("0.05")},
]
assert set(user.deprioritized_coins) == set(["DOGE", "XRP", "BNB"])
assert True == user.livemode
assert self.PURCHASE_MIN == user.purchase_min
assert MarketBuyStrategy.MARKET == user.buy_strategy
assert MarketIndexStrategy.MARKET_CAP == user.index_strategy
BuyCommand.execute(user=user, purchase_balance=Decimal(self.PURCHASE_MIN * 4))
all_order_tokens = [mock_call.kwargs["symbol"] for mock_call in order_market_buy_mock.mock_calls]
# top market tokens should be prioritized
assert set(all_order_tokens) == set(["BTCUSD", "ETHUSD", "ADAUSD", "SOLUSD"])
@patch(
"bot.exchanges.open_orders",
return_value=[
ExchangeOrder(
symbol="ADA",
trading_pair="ADAUSD",
quantity=Decimal("5.10000000"),
price=Decimal("2.0000"),
created_at=1631457393,
time_in_force=OrderTimeInForce("GTC"),
type=OrderType("BUY"),
id="259074455",
exchange=SupportedExchanges.BINANCE,
)
],
)
@patch("bot.exchanges.portfolio", return_value=[])
def test_cancelling_stale_orders(self, _mock_portfolio, _mock_open_orders):
user = user_from_env()
user.livemode = False
user.cancel_stale_orders = True
user.buy_strategy = MarketBuyStrategy.LIMIT
assert user.livemode == False
assert user.cancel_stale_orders == True
assert user.buy_strategy == MarketBuyStrategy.LIMIT
BuyCommand.execute(user=user)
def test_not_buying_open_orders(self):
pass
```
#### File: crypto-index-fund-bot/users/models.py
```python
import json
from django.db import models
from encrypted_model_fields.fields import EncryptedCharField
# for ensuring all floats are parsed as decimals
class CustomJSONDecoder(json.JSONDecoder):
def __init__(self, *args, **kwargs):
from decimal import Decimal
kwargs["parse_float"] = Decimal
super().__init__(*args, **kwargs)
class User(models.Model):
# TODO these are not stored in `preferences` since we want to encrypt them in the future
# django requires an explicit field length; the key sizes here are probably much smaller
binance_api_key = EncryptedCharField(max_length=100, null=True)
binance_secret_key = EncryptedCharField(max_length=100, null=True)
external_portfolio = models.JSONField(default=dict, decoder=CustomJSONDecoder)
preferences = models.JSONField(default=dict)
name = models.CharField(max_length=100)
date_checked = models.DateTimeField(null=True)
def bot_user(self):
# copy all fields to the other instance of user currently used by the bot
# eventually, we'll want to merge the two but let's just get this working first
# TODO really terrible that we are using the same name here for both users
from bot.user import User as BotUser
bot_user = BotUser()
bot_user.binance_api_key = self.binance_api_key
bot_user.binance_secret_key = self.binance_secret_key
bot_user.external_portfolio = self.external_portfolio
for k, v in self.preferences.items():
setattr(bot_user, k, v)
return bot_user
``` |
{
"source": "jnascimentocode/REST-API-COM-PYTHON-E-FLASK",
"score": 2
} |
#### File: REST-API-COM-PYTHON-E-FLASK/resources/hotel.py
```python
from typing import ParamSpecArgs
from flask_restful import Resource, reqparse
from models.hotel import HotelModel
from flask_jwt_extended import jwt_required
from models.site import SiteModel
from resources.filtros import *
import sqlite3
path_params = reqparse.RequestParser()
path_params.add_argument('cidade', type=str)
path_params.add_argument('estrelas_min', type=float)
path_params.add_argument('estrelas_max', type=float)
path_params.add_argument('diaria_min', type=float)
path_params.add_argument('diaria_max', type=float)
path_params.add_argument('limit', type=float)
path_params.add_argument('offset', type=float)
class Hoteis(Resource):
def get(self):
connection = sqlite3.connect('banco.db')
cursor = connection.cursor()
dados = path_params.parse_args()
dados_validos = {chave:dados[chave] for chave in dados if dados[chave] is not None}
parametros = normalize_path_params(**dados_validos)
if not parametros.get('cidade'):
tupla = tuple([parametros[chave] for chave in parametros])
resultado = cursor.execute(consulta_sem_cidade, tupla)
else:
tupla = tuple([parametros[chave] for chave in parametros])
resultado = cursor.execute(consulta_com_cidade, tupla)
hoteis = []
for linha in resultado:
hoteis.append({
'hotel_id': linha[0],
'nome': linha[1],
'estrelas': linha[2],
'diaria': linha[3],
'cidade': linha[4],
'site_id': linha[5]
})
return {'hoteis': hoteis}
class Hotel(Resource):
argumentos = reqparse.RequestParser()
argumentos.add_argument('nome', type=str, required=True, help="The field 'nome' cannot be left blank")
argumentos.add_argument('estrelas', type=float, required=True, help="The field 'estrelas' cannot be left blank")
argumentos.add_argument('diaria')
argumentos.add_argument('cidade')
argumentos.add_argument('site_id', type=int, required=True, help="Every hotel needs to be linked with site")
def get(self, hotel_id):
hotel = HotelModel.find_hotel(hotel_id)
if hotel:
return hotel.json()
return {'message': 'Hotel not found.'}, 404
@jwt_required()
def post(self, hotel_id):
if HotelModel.find_hotel(hotel_id):
return {"message": "Hotel id '{}' already exists.".format(hotel_id)}, 400
dados = Hotel.argumentos.parse_args()
hotel = HotelModel(hotel_id, **dados)
if not SiteModel.find_by_id(dados.get('site_id')):
return {'message': 'The hotel must be associated to a valid site id'}, 400
try:
hotel.save_hotel()
except:
return {'message': 'An internal error occurred trying to save hotel.'}, 500
return hotel.json()
@jwt_required()
def put(self, hotel_id):
dados = Hotel.argumentos.parse_args()
hotel_encontrado = HotelModel.find_hotel(hotel_id)
if hotel_encontrado:
hotel_encontrado.update_hotel(**dados)
hotel_encontrado.save_hotel()
return hotel_encontrado.json(), 200
hotel = HotelModel(hotel_id, **dados)
try:
hotel.save_hotel()
except:
return {'message': 'An internal error occurred trying to save hotel.'}, 500
return hotel.json(), 201 #created
@jwt_required()
def delete(self, hotel_id):
global hoteis
hotel = HotelModel.find_hotel(hotel_id)
if hotel:
try:
hotel.delete_hotel()
except:
return {'message': 'An error occurred trying to delete hotel.'}, 500
return {'message': 'Hotel deleted.'}
return {'message': 'Hotel not found.'}, 404
``` |
{
"source": "jnascimentocode/sistema-cadastro-terminal-MySQL",
"score": 3
} |
#### File: Cadastro/interface/__init__.py
```python
from time import sleep
def validacao_dado(i):
while True:
try:
n = int(input(i))
except (ValueError, TypeError):
print('\033[0;31m VALOR INVÁLIDO. \033[m')
sleep(0.5)
else:
return n
def linha(tam=55):
return '_' * tam
def cabecalho(txt):
print(linha())
print(f'\033[1;30;m{txt.center(55)}\033[m')
print(linha())
def menu(lista):
c = 1
for item in lista:
print(f'\033[33m{c}\033[m - \033[34m{item}\033[m')
c += 1
print(linha())
opc = validacao_dado('\033[33m DIGITE A OPÇÃO: \033[m ')
return opc
def tratar_sexo(s):
while True:
sexo = s
if sexo not in 'MmFf':
print('FAVOR DIGITAR FORMATO VÁLIDO.')
else:
return sexo
break
``` |
{
"source": "jnasingleton/python_code_snippets",
"score": 3
} |
#### File: jnasingleton/python_code_snippets/append_pdfs.py
```python
from PyPDF2 import PdfFileMerger
def append_pdfs(pdfs, output_pdf):
merger = PdfFileMerger()
for pdf in pdfs:
merger.append(pdf)
merger.write("result.pdf")
pdfs = ['Part 103042018.pdf', 'Part 203042018.pdf']
append_pdfs(pdfs, output_pdf)
``` |
{
"source": "jnatishay78/DS-Algo",
"score": 4
} |
#### File: DS-Algo/Insertion Sort/insertion_sort.py
```python
def insertionSort(arr):
for i in range(1, len(arr)):
m = arr[i]
j = i-1
while j >= 0 and m < arr[j] :
arr[j + 1] = arr[j]
j -= 1
arr[j + 1] = m
arr = [ 21, 12, 23, 15, 5 ]
insertionSort(arr)
for i in range(len(arr)):
print ("% d" % arr[i])
```
#### File: DS-Algo/Shell Sort/shell_sort.py
```python
def shellSort(a, num):
interval = num // 2
while interval > 0:
for i in range(interval, num):
temp = a[i]
j = i
while j >= interval and a[j - interval] > temp:
a[j] = a[j - interval]
j -= interval
a[j] = temp
interval //= 2
a = [ 3, 60, 35, 2, 45, 320, 5 ]
size = len(a)
shellSort(a, size)
print('Sorted Array:')
print(a)
``` |
{
"source": "jnatkins/dbt",
"score": 2
} |
#### File: adapters/postgres/connections.py
```python
from contextlib import contextmanager
import psycopg2
import dbt.exceptions
from dbt.adapters.base import Credentials
from dbt.adapters.sql import SQLConnectionManager
from dbt.contracts.connection import AdapterResponse
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.helper_types import Port
from dataclasses import dataclass
from typing import Optional
@dataclass
class PostgresCredentials(Credentials):
host: str
user: str
port: Port
password: str # on postgres the password is mandatory
connect_timeout: int = 10
role: Optional[str] = None
search_path: Optional[str] = None
keepalives_idle: int = 0 # 0 means to use the default value
sslmode: Optional[str] = None
sslcert: Optional[str] = None
sslkey: Optional[str] = None
sslrootcert: Optional[str] = None
application_name: Optional[str] = 'dbt'
_ALIASES = {
'dbname': 'database',
'pass': 'password'
}
@property
def type(self):
return 'postgres'
def _connection_keys(self):
return ('host', 'port', 'user', 'database', 'schema', 'search_path',
'keepalives_idle', 'sslmode')
class PostgresConnectionManager(SQLConnectionManager):
TYPE = 'postgres'
@contextmanager
def exception_handler(self, sql):
try:
yield
except psycopg2.DatabaseError as e:
logger.debug('Postgres error: {}'.format(str(e)))
try:
self.rollback_if_open()
except psycopg2.Error:
logger.debug("Failed to release connection!")
pass
raise dbt.exceptions.DatabaseException(str(e).strip()) from e
except Exception as e:
logger.debug("Error running SQL: {}", sql)
logger.debug("Rolling back transaction.")
self.rollback_if_open()
if isinstance(e, dbt.exceptions.RuntimeException):
# during a sql query, an internal to dbt exception was raised.
# this sounds a lot like a signal handler and probably has
# useful information, so raise it without modification.
raise
raise dbt.exceptions.RuntimeException(e) from e
@classmethod
def open(cls, connection):
if connection.state == 'open':
logger.debug('Connection is already open, skipping open.')
return connection
credentials = cls.get_credentials(connection.credentials)
kwargs = {}
# we don't want to pass 0 along to connect() as postgres will try to
# call an invalid setsockopt() call (contrary to the docs).
if credentials.keepalives_idle:
kwargs['keepalives_idle'] = credentials.keepalives_idle
# psycopg2 doesn't support search_path officially,
# see https://github.com/psycopg/psycopg2/issues/465
search_path = credentials.search_path
if search_path is not None and search_path != '':
# see https://postgresql.org/docs/9.5/libpq-connect.html
kwargs['options'] = '-c search_path={}'.format(
search_path.replace(' ', '\\ '))
if credentials.sslmode:
kwargs['sslmode'] = credentials.sslmode
if credentials.sslcert is not None:
kwargs["sslcert"] = credentials.sslcert
if credentials.sslkey is not None:
kwargs["sslkey"] = credentials.sslkey
if credentials.sslrootcert is not None:
kwargs["sslrootcert"] = credentials.sslrootcert
if credentials.application_name:
kwargs['application_name'] = credentials.application_name
try:
handle = psycopg2.connect(
dbname=credentials.database,
user=credentials.user,
host=credentials.host,
password=<PASSWORD>,
port=credentials.port,
connect_timeout=credentials.connect_timeout,
**kwargs)
if credentials.role:
handle.cursor().execute('set role {}'.format(credentials.role))
connection.handle = handle
connection.state = 'open'
except psycopg2.Error as e:
logger.debug("Got an error when attempting to open a postgres "
"connection: '{}'"
.format(e))
connection.handle = None
connection.state = 'fail'
raise dbt.exceptions.FailedToConnectException(str(e))
return connection
def cancel(self, connection):
connection_name = connection.name
try:
pid = connection.handle.get_backend_pid()
except psycopg2.InterfaceError as exc:
# if the connection is already closed, not much to cancel!
if 'already closed' in str(exc):
logger.debug(
f'Connection {connection_name} was already closed'
)
return
# probably bad, re-raise it
raise
sql = "select pg_terminate_backend({})".format(pid)
logger.debug("Cancelling query '{}' ({})".format(connection_name, pid))
_, cursor = self.add_query(sql)
res = cursor.fetchone()
logger.debug("Cancel query '{}': {}".format(connection_name, res))
@classmethod
def get_credentials(cls, credentials):
return credentials
@classmethod
def get_response(cls, cursor) -> AdapterResponse:
message = str(cursor.statusmessage)
rows = cursor.rowcount
status_message_parts = message.split() if message is not None else []
status_messsage_strings = [
part
for part in status_message_parts
if not part.isdigit()
]
code = ' '.join(status_messsage_strings)
return AdapterResponse(
_message=message,
code=code,
rows_affected=rows
)
``` |
{
"source": "jnatkins/dbt-snowflake",
"score": 2
} |
#### File: functional/adapter/test_incremental_unique_id.py
```python
import pytest
from dbt.tests.adapter.incremental.test_incremental_unique_id import BaseIncrementalUniqueKey
class TestUniqueKeySnowflake(BaseIncrementalUniqueKey):
pass
class TestUniqueKeyDeleteInsertSnowflake(BaseIncrementalUniqueKey):
@pytest.fixture(scope="class")
def project_config_update(self):
return {
"models": { "+incremental_strategy": "delete+insert" }
}
``` |
{
"source": "j-navesx/spbd",
"score": 3
} |
#### File: map-reduce/P1/map_p1_01.py
```python
from sys import stdin
import logging
def main():
stdin.readline()
for line in stdin:
line = line.strip()
line = line.split(',')
# Monitor_id:County_Code:State_Name
print(f"{line[2]}:{line[0]+line[1]}:{line[24]}")
if __name__ == '__main__':
main()
```
#### File: map-reduce/P4/map_p4_01.py
```python
from sys import stdin
def main():
stdin.readline()
for line in stdin:
line = line.strip()
line = line.split(',')
# StateCode+CountyCode,Site,State,Latitude,Longitude
line[5] = float("{:.3f}".format(float(line[5])))
line[6] = float("{:.3f}".format(float(line[6])))
print(f"{line[0]+line[1]},{line[2]},{line[24]},{line[5]},{line[6]}")
if __name__ == '__main__':
main()
```
#### File: map-reduce/P4/map_p4_03.py
```python
from sys import stdin
def main():
for line in stdin:
line = line.strip()
state, distance = line.split(',')
# State,Distance
print(f"{state},{distance}")
if __name__ == '__main__':
main()
``` |
{
"source": "jnawjux/recommendation-case-study",
"score": 3
} |
#### File: jnawjux/recommendation-case-study/cold_start.py
```python
from __future__ import division
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from sklearn.cluster import KMeans
import pyspark
def ohe_columns(series, name):
ohe = OneHotEncoder(categories='auto')
ohe.fit(series)
cols = ohe.get_feature_names(name)
ohe = ohe.transform(series)
final_df = pd.DataFrame(ohe.toarray(), columns=cols)
return final_df
def add_clusters_to_users(n_clusters=8):
"""
parameters:number of clusters
return: user dataframe
"""
# Get the user data
user_df = pd.read_csv('data/users.dat', sep='::', header=None
, names=['id', 'gender', 'age', 'occupation', 'zip'])
# OHE for clustering
my_cols = ['gender', 'age', 'occupation']
ohe_multi = OneHotEncoder(categories='auto')
ohe_multi.fit(user_df[my_cols])
ohe_mat = ohe_multi.transform(user_df[my_cols])
# Then KMeans cluster
k_clusters = KMeans(n_clusters=8, random_state=42)
k_clusters.fit(ohe_mat)
preds = k_clusters.predict(ohe_mat)
# Add clusters to user df
user_df['cluster'] = preds
return user_df
def add_cluster_to_ratings(user_df):
"""
given user_df with clusters, add clusters to ratings data
parameters
---------
user_df: df with user data
returns
-------
ratings_df: ratings_df with cluster column
"""
# Read in ratings file
#Get ratings file - create Spark instance for loading JSON
spark = pyspark.sql.SparkSession.builder.getOrCreate()
sc = spark.sparkContext
ratings_df = spark.read.json('data/ratings.json').toPandas()
# Set up clusters
cluster_dict = {}
for k, v in zip(user_df['id'].tolist(), user_df['cluster'].tolist()):
cluster_dict[k] = v
# Add cluster to ratings
ratings_df['cluster'] = ratings_df['user_id'].apply(lambda x: cluster_dict[x])
return ratings_df
def get_cold_start_rating(user_id, movie_id):
"""
Given user_id and movie_id, return a predicted rating
parameters
----------
user_id, movie_id
returns
-------
movie rating (float)
If current user, current movie = average rating of movie by cluster
If current user, NOT current movie = average rating for cluster
If NOT current user, and current movie = average for the movie
If NOT current user, and NOT current movie = average for all ratings
"""
if user_id in u_info['id'].tolist():
if movie_id in ratings_df['movie_id'].tolist():
cluster = u_info.loc[u_info['id'] == user_id]['cluster'].tolist()[0]
cluster_df = ratings_df.loc[(ratings_df['cluster'] == cluster)]['rating']
cluster_avg = cluster_df.mean()
else:
cluster = u_info.loc[u_info['id'] == user_id]['cluster'].tolist()[0]
cluster_rating = ratings_df.loc[ratings_df['cluster'] == cluster]['rating'].tolist()
if len(cluster_rating) > 1:
cluster_avg = sum(cluster_rating)/len(cluster_rating)
else:
cluster_avg = cluster_rating[0]
return cluster_avg
else:
if movie_id in ratings_df['movie_id'].tolist():
movie_rating = ratings_df.loc[ratings_df['movie_id'] == movie_id]['rating'].tolist()
if len(movie_rating) > 1:
movie_avg = sum(movie_rating)/len(movie_rating)
else:
movie_avg = movie_rating[0]
return movie_avg
else:
return ratings_df['rating'].mean()
``` |
{
"source": "jnazaren/CS105-DataProject",
"score": 3
} |
#### File: CS105-DataProject/MachineLearning/MultiDimensionalLearning.py
```python
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from tqdm import tqdm
def compute_cost(x_val_mat, y_val_mat, z_val_mat, theta, beta):
m = z_val_mat.size
H = (theta * x_val_mat) + (beta * y_val_mat)
S = np.sum(np.asarray(H - z_val_mat)**2, axis=1)[0]
return S/(2*m) # J (cost) value
def gradient_descent(x_val_mat, y_val_mat, z_val_mat, theta, beta, iterations, alpha_t, alpha_b):
print("Performing gradient descent...")
m = z_val_mat.size
for i in tqdm(range(0, iterations), desc="Percentage Completed"):
# ------- gradient descent for theta: ----------
HY = ((theta * x_val_mat) + (beta * y_val_mat)) - z_val_mat
SA = HY * x_val_mat.transpose()
SA *= alpha_t / m
theta = theta - SA
# ------- gradient descent for beta: ----------
HY = ((theta * x_val_mat) + (beta * y_val_mat)) - z_val_mat
SA = HY * y_val_mat.transpose()
SA *= alpha_b / m
beta = beta - SA
# ------- check for overshoot: ----------
if np.any(np.isnan(theta)):
raise ValueError("Smaller theta learning rate needed!")
if np.any(np.isnan(beta)):
raise ValueError("Smaller beta learning rate needed!")
return theta, beta
def learn(iterations=150000, theta_learning_rate=0.01, beta_learning_rate=0.01):
succeeded = False
infile = input("Name of formatted data file: ")
xvals, yvals, zvals = np.loadtxt(infile, delimiter=",", unpack=True)
x_degree = int(input("Degree of x polynomial: "))
y_degree = int(input("Degree of y polynomial: "))
theta = "0 " * x_degree + "0"
theta = np.matrix(theta)
beta = "0 " * y_degree + "0"
beta = np.matrix(beta)
x_vals = [np.ones(len(xvals))]
y_vals = [np.ones(len(yvals))]
for d in range(1, x_degree+1):
x_vals = np.append(x_vals, [xvals**d], axis=0)
for d in range(1, y_degree+1):
y_vals = np.append(y_vals, [yvals**d], axis=0)
x_val_mat = np.matrix(x_vals)
y_val_mat = np.matrix(y_vals)
z_val_mat = np.matrix(zvals)
cost = compute_cost(x_val_mat, y_val_mat, z_val_mat, theta, beta)
print("Initial cost: " + str(cost))
theta_new = theta
beta_new = beta
while not succeeded:
try:
print("Current theta learning rate: " + str(theta_learning_rate))
print("Current beta learning rate: " + str(beta_learning_rate))
result = gradient_descent(x_val_mat, y_val_mat, z_val_mat, theta, beta,
iterations, theta_learning_rate, beta_learning_rate)
theta_new = np.asarray(result[0])[0]
beta_new = np.asarray(result[1])[0]
print("\nTheta values found: " + str(theta_new))
print("Beta values found: " + str(beta_new))
succeeded = True
except ValueError as e:
if "theta" in str(e):
print("Theta learning rate too large, trying a smaller one...")
theta_learning_rate /= 10
elif "beta" in str(e):
print("Beta learning rate too large, trying a smaller one...")
beta_learning_rate /= 10
else:
print("UNIDENTIFIED ERROR\nEXITING...")
return
cost_new = compute_cost(x_val_mat, y_val_mat, z_val_mat, theta_new, beta_new)
print("\nFinal cost: " + str(cost_new))
# TODO change these parameters as needed - this is IMPORTANT!!
x = np.arange(0, max(xvals) + 10, 100)
y = np.arange(0, max(yvals) + 1, 0.2)
x_template, y_template = np.meshgrid(x, y)
function = theta_new[0]
for l in range(1, len(theta_new)):
function += theta_new[l]*(x_template**l)
for l in range(0, len(beta_new)):
function += beta_new[l]*(y_template**l)
print("\nPlotting data...")
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(xvals, yvals, zvals, c='g', marker='o')
surf = ax.plot_surface(x_template, y_template, function,
rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=True)
surf.set_alpha(0.75)
fig.colorbar(surf, shrink=0.5, aspect=5)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.autoscale()
plt.show()
return theta_new, beta_new
if __name__ == "__main__":
learn()
``` |
{
"source": "jnbek/aur_tools",
"score": 2
} |
#### File: aur_tools/fs_notify_crap/run_gamin.py
```python
import gamin
import sys
import time
def callback(path, event):
print "Got callback: %s, %s" % (path, event)
def main():
mon = gamin.WatchMonitor()
mon.watch_directory("/home/jnbek/tmp", callback)
time.sleep(1)
# ret = mon.event_pending()
# if ret > 0:
# ret = mon.handle_one_event()
# ret = mon.handle_events()
while 1:
mon.handle_events()
time.sleep(1)
# mon.stop_watch(".")
# del mon
# main
if __name__ == '__main__':
main()
``` |
{
"source": "jnbellinger/lta",
"score": 2
} |
#### File: lta/doc/IceCubeSyncSite.py
```python
from __future__ import absolute_import, division, print_function
import json
import multiprocessing
import re
from argparse import ArgumentParser
from subprocess import Popen, PIPE
from gfal2 import Gfal2Context, GError
import rucio.rse.rsemanager as rsemgr
from rucio.client.didclient import DIDClient
from rucio.client.replicaclient import ReplicaClient
from rucio.common.exception import DataIdentifierAlreadyExists
from rucio.common.exception import RucioException
from rucio.common.exception import FileAlreadyExists
from rucio.common.exception import DuplicateRule
from rucio.common.exception import InvalidObject
from rucio.client import RuleClient
import gfal2
DEFAULT_SCOPE = 'exp'
DEBUG_FLAG = False
DEFAULT_LIMIT = 10
DEFAULT_ORIGIN_RSE = 'WIPAC-ORIGINAL2'
#Default dataset like IceCube/2016/filtered/level2pass2/0101/Run00127347
class RunSync(object):
"""
Synchronize the replica of a given run at WIPAC-ORIG
the corresponding Rucio site.
"""
def __init__(self, run, originrse=DEFAULT_ORIGIN_RSE, destrse=None, scope=DEFAULT_SCOPE,
check=True, lifetime=None, dry_run=False, container=None):
"""
:param dataset: Name of the PhEDEx dataset to synchronize with Rucio.
:param pnn: PhEDEx node name to filter on for replica information.
"""
self.run = run
self.originrse = originrse
self.destrse = destrse
self.scope = scope
self.check = check
self.lifetime = lifetime
self.dry_run = dry_run
self.container = container
self.rucio_datasets = {}
self.run_files = {}
self.existent_replica_files = {}
self.url = ''
self.gfal = Gfal2Context()
self.run_Number = None
self.get_run_Number()
self.files_storage = {}
self.get_global_url()
self.didc = DIDClient()
self.repc = ReplicaClient()
self.rulesClient = RuleClient()
# Right now obtaining the Metadata from the storage at WIPAC
# Hopefully in the future from JADE # TODO
self.get_run_Files()
self.get_rucio_metadata()
self.update_run_Files()
self.get_files_metadata()
def update_run_Files(self):
"""
Updating the run files wiht only the files that have not been registered
"""
for f in self.existent_replica_files:
file_name = f.split('/')[-1:][0]
if file_name in self.run_files:
print("File: %s already registered. Skipping it" % file_name)
self.run_files.pop(file_name)
def get_files_metadata(self):
for f in self.run_files:
if self.run + '/' + f not in self.existent_replica_files:
self.obtain_metadata(f)
print("Metadat initialization done")
def obtain_metadata(self, filename):
"""
Get the size and checksum for every file in the run from the gftp server
"""
url = self.get_file_url(filename)
print("checking metadata for url %s" % url)
try:
size = self.gfal.stat(str(url)).st_size
adler32 = self.gfal.checksum(str(url), 'adler32')
print("got size and adler 32checksum of file: pfn=%s size=%s checksum=%s"% (url, size, adler32))
self.run_files[filename] = {'size':size, 'adler32':adler32, 'name': self.run + '/' + filename}
except GError:
print("no file found at %s" % url)
return False
def get_file_url(self, filename):
return self.url + '/' + self.run + '/' + filename
def get_global_url(self):
"""
Return the base path of the rucio url
"""
print("Getting parameters for rse %s" % self.originrse)
rse = rsemgr.get_rse_info(self.originrse)
proto = rse['protocols'][0]
schema = proto['scheme']
prefix = proto['prefix'] + self.scope.replace('.', '/')
if schema == 'srm':
prefix = proto['extended_attributes']['web_service_path'] + prefix
url = schema + '://' + proto['hostname']
if proto['port'] != 0:
url = url + ':' + str(proto['port'])
self.url = url + prefix
print("Determined base url %s" % self.url)
def get_run_Number(self):
"""
Obtain the run number out of whole run IceCube/2016/filtered/level2pass2/0101/Run00127347
"""
print("Obtaining run number out of run(dataset): %s" % self.run)
self.run_Number = self.run.split("/")[-1]
print("Run number (dataset): %s" % self.run_Number)
def get_run_Files(self):
"""
Gets the list of files for a given run and their checksums from the storage
"""
self.run_url = self.url + '/' + self.run
print("Listin files from url : %s" % self.run_url)
run_files = []
try:
run_files = self.gfal.listdir(str(self.run_url))
except GError:
print("No files found at %s" % str(self.run_url))
print("Files found in storage:")
count = 0
for f in run_files:
if len(f) > 3:
if count < 5000:
self.run_files[f] = {}
count = count + 1
else:
break
def get_rucio_metadata(self):
"""
Gets the list of datasets at the Rucio RSE, the files, and the metadata.
"""
print("Initializing Rucio... getting the list of blocks and files at %s"
% self.originrse)
registered_datasets = self.repc.list_datasets_per_rse(self.originrse)
for dataset in registered_datasets:
self.rucio_datasets[dataset] = {}
replica_info = self.repc.list_replicas([{"scope": self.scope,
"name": '/'+self.run_Number}],
rse_expression="rse=%s" % self.originrse)
replica_files = set()
for file_info in replica_info:
name = file_info['name']
if self.originrse in file_info['rses']:
replica_files.add(name)
self.existent_replica_files = replica_files
print("Rucio initialization done.")
def register(self):
"""
Create the container, the datasets and attach them to the container.
"""
print("Registering...")
self.register_dataset(self.run_Number)
self.register_replicas(self.run_files)
self.register_container(self.container)
self.attach_dataset_to_container(self.run_Number, self.container)
self.add_replica_rule(dataset=self.run_Number, destRSE=self.destrse)
def register_container(self, container):
"""
Registering the container
"""
print("Registering the container %s with scope: %s" % (container,self.scope))
if container is None:
print ('No container added, not registering any container')
return
if self.dry_run:
print ('Dry run only, not registering the container')
return
try:
self.didc.add_container(scope=self.scope, name=container, lifetime=self.lifetime)
except DataIdentifierAlreadyExists:
print("Container %s already exists" % container)
except InvalidObject:
print("Problem with container name: %s" % container)
def attach_dataset_to_container(self, dataset, container):
"""
Attaching the dataset to a container
"""
print("Attaching dataset %s, to container: %s" % (dataset, container))
if container is None:
print ('No container added, not registering dataset in container')
return
if self.dry_run:
print ('Dry run only, not attaching dataset container')
return
try:
self.didc.attach_dids(scope=self.scope, name=container,
dids=[{'scope': self.scope, 'name': '/'+dataset}])
except RucioException:
print("dataset already attached to container")
return
def register_dataset(self, run):
"""
Registering a dataset in the rucio database
"""
print("registering dataset %s"% run)
if self.dry_run:
print(' Dry run only. Not creating dataset.')
return
try:
self.didc.add_dataset(scope=self.scope, name=run, lifetime=self.lifetime)
except DataIdentifierAlreadyExists:
print(" Dataset %s already exists" % run)
def register_replicas(self, replicas):
"""
Register file replica.
"""
if not replicas:
return
print("registering files in Rucio: %s" % ", ".join([replicas[filemd]['name'] for filemd in replicas]))
if self.dry_run:
print(' Dry run only. Not registering files.')
return
try:
self.repc.add_replicas(rse=self.originrse, files=[{
'scope': self.scope,
'name': replicas[filemd]['name'],
'adler32': replicas[filemd]['adler32'],
'bytes': replicas[filemd]['size'],
} for filemd in replicas])
print("Adding files to dataset: %s" % self.run_Number)
except InvalidObject:
print("Problem with file name does not match pattern")
for filemd in replicas:
try:
self.didc.attach_dids(scope=self.scope, name=self.run_Number, dids=[{
'scope': self.scope,
'name': replicas[filemd]['name']}])
except FileAlreadyExists:
print("File already attached")
def add_replica_rule(self, destRSE, dataset):
"""
Create a replication rule for one dataset "Run" at an RSE
"""
print("Creating replica rule for dataset %s at rse: %s" % (dataset, destRSE))
if self.dry_run:
print(' Dry run only. Not creating rules')
return
if destRSE:
try:
self.rulesClient.add_replication_rule([{"scope":self.scope,"name":"/"+dataset}],copies=1, rse_expression=destRSE)
except DuplicateRule:
print('Rule already exists')
def sync_one_dataset(dataset, originrse, destrse, scope, check, dry_run, container):
"""
Helper function for DatasetSync
"""
instance = RunSync(
run=dataset,
originrse=originrse,
destrse=destrse,
scope=scope,
check=check,
dry_run=dry_run,
container=container,
)
instance.register()
def getDatasetListFromFile(datasetFile):
with open(datasetFile) as f:
content = f.readlines()
content = [x[9:].strip() for x in content]
return content
def main():
"""
Main function.
"""
parser = ArgumentParser(description="Given a Dataset (Run), like 2016/filtered/level2pass2/0101/Run00127347 "
"register files in Rucio in case they are not there")
parser.add_argument('--scope', dest='scope', help='scope of the dataset (default %s).'
% DEFAULT_SCOPE, default=DEFAULT_SCOPE)
parser.add_argument('--originrse', dest='originrse', help='Origin RSE the default is WIPAC-ORIGINAL', default=DEFAULT_ORIGIN_RSE)
parser.add_argument('--destrse', dest='destrse', help='Destination RSE where the files should be rpelicated')
parser.add_argument('--nocheck', dest='check', action='store_false',
help='do not check size and checksum of files replicas on storage.')
parser.add_argument('--limit', dest='limit', default=DEFAULT_LIMIT, type=int,
help="limit on the number of datasets to attempt sync. default %s. -1 for unlimited" % DEFAULT_LIMIT)
parser.add_argument('--pool', dest='pool', default=5, type=int,
help="number of helper processes to use.")
parser.add_argument('--dryrun', dest='dry_run', action='store_true',
help='do not change anything in rucio, checking only')
parser.add_argument('--dataset', dest='dataset', action='append',
help='specific dataset or runs to sync')
parser.add_argument('--container', dest='container', action='store', help='Container to attach the dataset to')
parser.add_argument('--datasetFile', dest='datasetFile', action='store', help='File with the list of runs')
options = parser.parse_args()
pool = multiprocessing.Pool(options.pool)
datasets = options.dataset
datasetFile = options.datasetFile
limit = options.limit
count = 0
futures = []
if datasetFile:
if datasets:
datasets = datasets.extend(getDatasetListFromFile(datasetFile))
else:
datasets = getDatasetListFromFile(datasetFile)
for dataset in datasets:
count += 1
if limit > 0 and count >= limit:
break
future = pool.apply_async(sync_one_dataset,(dataset, options.originrse, options.destrse,options.scope, options.check, options.dry_run, options.container))
futures.append((dataset, future))
pool.close()
for dataset, future in futures:
future.get()
print("Finished processing dataset %s" % dataset)
if __name__ == '__main__':
main()
```
#### File: lta/lta/crypto.py
```python
import hashlib
from typing import Dict
import zlib
# Adapted from sha512sum below; smh .tobytes()
def adler32sum(filename: str) -> str:
"""Compute the adler32 checksum of the data in the specified file."""
value = 1
b = bytearray(128*1024)
mv = memoryview(b)
with open(filename, 'rb', buffering=0) as f:
# Known issue with MyPy: https://github.com/python/typeshed/issues/2166
for n in iter(lambda: f.readinto(mv), 0): # type: ignore
value = zlib.adler32(mv[:n].tobytes(), value)
return ("%08X" % (value & 0xffffffff)).lower()
# Adapted from: https://stackoverflow.com/a/44873382
def sha512sum(filename: str) -> str:
"""Compute the SHA512 hash of the data in the specified file."""
h = hashlib.sha512()
b = bytearray(128*1024)
mv = memoryview(b)
with open(filename, 'rb', buffering=0) as f:
# Known issue with MyPy: https://github.com/python/typeshed/issues/2166
for n in iter(lambda: f.readinto(mv), 0): # type: ignore
h.update(mv[:n])
return h.hexdigest()
# A combination of the adler32sum and sha512sum above; so we read the data once
def lta_checksums(filename: str) -> Dict[str, str]:
"""Compute the adler32 and SHA512 hash of the data in the specified file."""
value = 1
h = hashlib.sha512()
b = bytearray(128*1024)
mv = memoryview(b)
with open(filename, 'rb', buffering=0) as f:
# Known issue with MyPy: https://github.com/python/typeshed/issues/2166
for n in iter(lambda: f.readinto(mv), 0): # type: ignore
value = zlib.adler32(mv[:n].tobytes(), value)
h.update(mv[:n])
return {
"adler32": ("%08X" % (value & 0xffffffff)).lower(),
"sha512": h.hexdigest(),
}
```
#### File: lta/lta/joiner.py
```python
import os
from typing import List
def join_smart(items: List[str]) -> str:
"""Join paths together the way Node.js does it."""
if not items:
return "."
abnormal_path = os.path.sep.join(items)
normal_path = os.path.normpath(abnormal_path)
if(items[-1].endswith(os.path.sep)):
normal_path += os.path.sep
return normal_path
def join_smart_url(items: List[str]) -> str:
"""Join URL items together."""
if not items:
return ""
base = items[0]
if(base.endswith(os.path.sep)):
base = base[:-1]
items = items[1:]
items_str = join_smart(items)
if(items_str.startswith(os.path.sep)):
items_str = items_str[1:]
return "/".join([base, items_str])
```
#### File: lta/lta/lta_const.py
```python
def drain_semaphore_filename(component: str) -> str:
"""Obtain the canonical drain semaphore filename for the specified component name."""
return f".lta-{component}-drain"
def pid_filename(component: str) -> str:
"""Obtain the canonical pid filename for the specified component name."""
return f".lta-{component}-pid"
```
#### File: lta/lta/picker.py
```python
import asyncio
import json
import logging
import sys
from typing import Any, Dict, List, Optional, Tuple
from binpacking import to_constant_volume # type: ignore
from rest_tools.client import RestClient
from rest_tools.server import from_environment
import wipac_telemetry.tracing_tools as wtt
from .component import COMMON_CONFIG, Component, now, status_loop, work_loop
from .log_format import StructuredFormatter
from .lta_types import BundleType, TransferRequestType
Logger = logging.Logger
# maximum number of Metadata UUIDs to supply to LTA DB for bulk_create
CREATE_CHUNK_SIZE = 1000
EXPECTED_CONFIG = COMMON_CONFIG.copy()
EXPECTED_CONFIG.update({
"FILE_CATALOG_PAGE_SIZE": "1000",
"FILE_CATALOG_REST_TOKEN": None,
"FILE_CATALOG_REST_URL": None,
"MAX_BUNDLE_SIZE": "107374182400", # 100 GiB
"WORK_RETRIES": "3",
"WORK_TIMEOUT_SECONDS": "30",
})
class Picker(Component):
"""
Picker is a Long Term Archive component.
A Picker is responsible for choosing the files that need to be bundled
and sent to remote archival destinations. It requests work from the
LTA REST API and then queries the file catalog to determine which files
to add to the LTA REST API.
"""
def __init__(self, config: Dict[str, str], logger: Logger) -> None:
"""
Create a Picker component.
config - A dictionary of required configuration values.
logger - The object the picker should use for logging.
"""
super(Picker, self).__init__("picker", config, logger)
self.file_catalog_page_size = int(config["FILE_CATALOG_PAGE_SIZE"])
self.file_catalog_rest_token = config["FILE_CATALOG_REST_TOKEN"]
self.file_catalog_rest_url = config["FILE_CATALOG_REST_URL"]
self.max_bundle_size = int(config["MAX_BUNDLE_SIZE"])
self.work_retries = int(config["WORK_RETRIES"])
self.work_timeout_seconds = float(config["WORK_TIMEOUT_SECONDS"])
def _do_status(self) -> Dict[str, Any]:
"""Picker has no additional status to contribute."""
return {}
def _expected_config(self) -> Dict[str, Optional[str]]:
"""Picker provides our expected configuration dictionary."""
return EXPECTED_CONFIG
@wtt.spanned()
async def _do_work(self) -> None:
"""Perform a work cycle for this component."""
self.logger.info("Starting work on TransferRequests.")
work_claimed = True
while work_claimed:
work_claimed = await self._do_work_claim()
work_claimed &= not self.run_once_and_die
self.logger.info("Ending work on TransferRequests.")
@wtt.spanned()
async def _do_work_claim(self) -> bool:
"""Claim a transfer request and perform work on it."""
# 1. Ask the LTA DB for the next TransferRequest to be picked
# configure a RestClient to talk to the LTA DB
lta_rc = RestClient(self.lta_rest_url,
token=self.lta_rest_token,
timeout=self.work_timeout_seconds,
retries=self.work_retries)
self.logger.info("Asking the LTA DB for a TransferRequest to work on.")
pop_body = {
"claimant": f"{self.name}-{self.instance_uuid}"
}
response = await lta_rc.request('POST', f'/TransferRequests/actions/pop?source={self.source_site}&dest={self.dest_site}', pop_body)
self.logger.info(f"LTA DB responded with: {response}")
tr = response["transfer_request"]
if not tr:
self.logger.info("LTA DB did not provide a TransferRequest to work on. Going on vacation.")
return False
# process the TransferRequest that we were given
try:
await self._do_work_transfer_request(lta_rc, tr)
except Exception as e:
self.logger.info(f"There was an error while processing the transfer request: {e}")
self.logger.info("Will now attempt to send the transfer request to 'quarantined' status.")
await self._quarantine_transfer_request(lta_rc, tr, f"{e}")
self.logger.info("Done sending the transfer request to 'quarantined' status, will end work cycle.")
return False
# if we were successful at processing work, let the caller know
return True
@wtt.spanned()
async def _do_work_transfer_request(self,
lta_rc: RestClient,
tr: TransferRequestType) -> None:
self.logger.info(f"Processing TransferRequest: {tr}")
# configure a RestClient to talk to the File Catalog
fc_rc = RestClient(self.file_catalog_rest_url,
token=self.file_catalog_rest_token,
timeout=self.work_timeout_seconds,
retries=self.work_retries)
# figure out which files need to go
source = tr["source"]
dest = tr["dest"]
path = tr["path"]
# query the file catalog for the source files
self.logger.info(f"Asking the File Catalog about files in {source}:{path}")
query_dict = {
"locations.site": {
"$eq": source
},
"locations.path": {
"$regex": f"^{path}"
},
"logical_name": {
"$regex": f"^{path}"
},
}
query_json = json.dumps(query_dict)
page_start = 0
catalog_files = []
fc_response = await fc_rc.request('GET', f'/api/files?query={query_json}&keys=uuid&limit={self.file_catalog_page_size}&start={page_start}')
num_files = len(fc_response["files"])
self.logger.info(f'File Catalog returned {num_files} file(s) to process.')
catalog_files.extend(fc_response["files"])
while num_files == self.file_catalog_page_size:
self.logger.info(f'Paging File Catalog. start={page_start}')
page_start += num_files
fc_response = await fc_rc.request('GET', f'/api/files?query={query_json}&keys=uuid&limit={self.file_catalog_page_size}&start={page_start}')
num_files = len(fc_response["files"])
self.logger.info(f'File Catalog returned {num_files} file(s) to process.')
catalog_files.extend(fc_response["files"])
# if we didn't get any files, this is bad mojo
if not catalog_files:
await self._quarantine_transfer_request(lta_rc, tr, "File Catalog returned zero files for the TransferRequest")
return
# create a packing list by querying the File Catalog for size information
num_catalog_files = len(catalog_files)
self.logger.info(f'Processing {num_catalog_files} UUIDs returned by the File Catalog.')
packing_list = []
for catalog_file in catalog_files:
catalog_file_uuid = catalog_file["uuid"]
catalog_record = await fc_rc.request('GET', f'/api/files/{catalog_file_uuid}')
file_size = catalog_record["file_size"]
# 0: uuid 1: size
packing_list.append((catalog_file_uuid, file_size))
# divide the packing list into an array of packing specifications
packing_spec = to_constant_volume(packing_list, self.max_bundle_size, 1) # 1: size
# for each packing list, we create a bundle in the LTA DB
self.logger.info(f"Creating {len(packing_spec)} new Bundles in the LTA DB.")
for spec in packing_spec:
self.logger.info(f"Packing specification contains {len(spec)} files.")
bundle_uuid = await self._create_bundle(lta_rc, {
"type": "Bundle",
# "uuid": unique_id(), # provided by LTA DB
"status": self.output_status,
"reason": "",
# "create_timestamp": right_now, # provided by LTA DB
# "update_timestamp": right_now, # provided by LTA DB
"request": tr["uuid"],
"source": source,
"dest": dest,
"path": path,
"file_count": len(spec),
})
await self._create_metadata_mapping(lta_rc, spec, bundle_uuid)
@wtt.spanned()
async def _create_bundle(self,
lta_rc: RestClient,
bundle: BundleType) -> Any:
self.logger.info('Creating new bundle in the LTA DB.')
create_body = {
"bundles": [bundle]
}
result = await lta_rc.request('POST', '/Bundles/actions/bulk_create', create_body)
uuid = result["bundles"][0]
return uuid
@wtt.spanned()
async def _create_metadata_mapping(self,
lta_rc: RestClient,
spec: List[Tuple[str, int]],
bundle_uuid: str) -> None:
self.logger.info(f'Creating {len(spec)} Metadata mappings between the File Catalog and pending bundle {bundle_uuid}.')
slice_index = 0
NUM_UUIDS = len(spec)
for i in range(slice_index, NUM_UUIDS, CREATE_CHUNK_SIZE):
slice_index = i
create_slice = spec[slice_index:slice_index+CREATE_CHUNK_SIZE]
create_body = {
"bundle_uuid": bundle_uuid,
"files": [x[0] for x in create_slice], # 0: uuid
}
result = await lta_rc.request('POST', '/Metadata/actions/bulk_create', create_body)
self.logger.info(f'Created {result["count"]} Metadata documents linking to pending bundle {bundle_uuid}.')
@wtt.spanned()
async def _quarantine_transfer_request(self,
lta_rc: RestClient,
tr: TransferRequestType,
reason: str) -> None:
self.logger.error(f'Sending TransferRequest {tr["uuid"]} to quarantine: {reason}.')
right_now = now()
patch_body = {
"status": "quarantined",
"reason": f"BY:{self.name}-{self.instance_uuid} REASON:{reason}",
"work_priority_timestamp": right_now,
}
try:
await lta_rc.request('PATCH', f'/TransferRequests/{tr["uuid"]}', patch_body)
except Exception as e:
self.logger.error(f'Unable to quarantine TransferRequest {tr["uuid"]}: {e}.')
def runner() -> None:
"""Configure a Picker component from the environment and set it running."""
# obtain our configuration from the environment
config = from_environment(EXPECTED_CONFIG)
# configure structured logging for the application
structured_formatter = StructuredFormatter(
component_type='Picker',
component_name=config["COMPONENT_NAME"], # type: ignore[arg-type]
ndjson=True)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(structured_formatter)
root_logger = logging.getLogger(None)
root_logger.setLevel(logging.NOTSET)
root_logger.addHandler(stream_handler)
logger = logging.getLogger("lta.picker")
# create our Picker service
picker = Picker(config, logger) # type: ignore[arg-type]
# let's get to work
picker.logger.info("Adding tasks to asyncio loop")
loop = asyncio.get_event_loop()
loop.create_task(status_loop(picker))
loop.create_task(work_loop(picker))
def main() -> None:
"""Configure a Picker component from the environment and set it running."""
runner()
asyncio.get_event_loop().run_forever()
if __name__ == "__main__":
main()
```
#### File: lta/lta/site_move_verifier.py
```python
import asyncio
import logging
import os
from subprocess import PIPE, run
import sys
from typing import Any, Dict, List, Optional
from rest_tools.client import RestClient
from rest_tools.server import from_environment
import wipac_telemetry.tracing_tools as wtt
from .component import COMMON_CONFIG, Component, now, status_loop, work_loop
from .crypto import sha512sum
from .joiner import join_smart
from .log_format import StructuredFormatter
from .lta_types import BundleType
from .rest_server import boolify
Logger = logging.Logger
EXPECTED_CONFIG = COMMON_CONFIG.copy()
EXPECTED_CONFIG.update({
"DEST_ROOT_PATH": None,
"USE_FULL_BUNDLE_PATH": "FALSE",
"WORK_RETRIES": "3",
"WORK_TIMEOUT_SECONDS": "30",
})
MYQUOTA_ARGS = ["/usr/bin/myquota", "-G"]
OLD_MTIME_EPOCH_SEC = 30 * 60 # 30 MINUTES * 60 SEC_PER_MIN
def as_nonempty_columns(s: str) -> List[str]:
"""Split the provided string into columns and return the non-empty ones."""
cols = s.split(" ")
nonempty = list(filter(discard_empty, cols))
return nonempty
def discard_empty(s: str) -> bool:
"""Return true if the provided string is non-empty."""
if s:
return True
return False
def parse_myquota(s: str) -> List[Dict[str, str]]:
"""Split the provided string into columns and return the non-empty ones."""
results = []
lines = s.split("\n")
keys = as_nonempty_columns(lines[0])
for i in range(1, len(lines)):
if lines[i]:
values = as_nonempty_columns(lines[i])
quota_dict = {}
for j in range(0, len(keys)):
quota_dict[keys[j]] = values[j]
results.append(quota_dict)
return results
class SiteMoveVerifier(Component):
"""
SiteMoveVerifier is a Long Term Archive component.
A SiteMoveVerifier is responsible for verifying that a transfer to a
destination site has completed successfully. The transfer service is
queried as to the status of its work. The SiteMoveVerifier then
calculates the checksum of the file to verify that the contents have
been copied faithfully.
"""
def __init__(self, config: Dict[str, str], logger: Logger) -> None:
"""
Create a SiteMoveVerifier component.
config - A dictionary of required configuration values.
logger - The object the site_move_verifier should use for logging.
"""
super(SiteMoveVerifier, self).__init__("site_move_verifier", config, logger)
self.dest_root_path = config["DEST_ROOT_PATH"]
self.use_full_bundle_path = boolify(config["USE_FULL_BUNDLE_PATH"])
self.work_retries = int(config["WORK_RETRIES"])
self.work_timeout_seconds = float(config["WORK_TIMEOUT_SECONDS"])
pass
def _do_status(self) -> Dict[str, Any]:
"""Provide additional status for the SiteMoveVerifier."""
quota = []
stdout = self._execute_myquota()
if stdout:
quota = parse_myquota(stdout)
return {"quota": quota}
def _expected_config(self) -> Dict[str, Optional[str]]:
"""Provide expected configuration dictionary."""
return EXPECTED_CONFIG
@wtt.spanned()
async def _do_work(self) -> None:
"""Perform a work cycle for this component."""
self.logger.info("Starting work on Bundles.")
work_claimed = True
while work_claimed:
work_claimed = await self._do_work_claim()
work_claimed &= not self.run_once_and_die
self.logger.info("Ending work on Bundles.")
@wtt.spanned()
async def _do_work_claim(self) -> bool:
"""Claim a bundle and perform work on it."""
# 1. Ask the LTA DB for the next Bundle to be verified
# configure a RestClient to talk to the LTA DB
lta_rc = RestClient(self.lta_rest_url,
token=self.lta_rest_token,
timeout=self.work_timeout_seconds,
retries=self.work_retries)
self.logger.info("Asking the LTA DB for a Bundle to verify.")
pop_body = {
"claimant": f"{self.name}-{self.instance_uuid}"
}
response = await lta_rc.request('POST', f'/Bundles/actions/pop?source={self.source_site}&dest={self.dest_site}&status={self.input_status}', pop_body)
self.logger.info(f"LTA DB responded with: {response}")
bundle = response["bundle"]
if not bundle:
self.logger.info("LTA DB did not provide a Bundle to verify. Going on vacation.")
return False
# process the Bundle that we were given
try:
await self._verify_bundle(lta_rc, bundle)
except Exception as e:
await self._quarantine_bundle(lta_rc, bundle, f"{e}")
raise e
# if we were successful at processing work, let the caller know
return True
@wtt.spanned()
async def _quarantine_bundle(self,
lta_rc: RestClient,
bundle: BundleType,
reason: str) -> None:
"""Quarantine the supplied bundle using the supplied reason."""
self.logger.error(f'Sending Bundle {bundle["uuid"]} to quarantine: {reason}.')
right_now = now()
patch_body = {
"status": "quarantined",
"reason": f"BY:{self.name}-{self.instance_uuid} REASON:{reason}",
"work_priority_timestamp": right_now,
}
try:
await lta_rc.request('PATCH', f'/Bundles/{bundle["uuid"]}', patch_body)
except Exception as e:
self.logger.error(f'Unable to quarantine Bundle {bundle["uuid"]}: {e}.')
@wtt.spanned()
async def _verify_bundle(self, lta_rc: RestClient, bundle: BundleType) -> bool:
"""Verify the provided Bundle with the transfer service and update the LTA DB."""
# get our ducks in a row
bundle_id = bundle["uuid"]
if self.use_full_bundle_path:
bundle_name = bundle["bundle_path"]
else:
bundle_name = os.path.basename(bundle["bundle_path"])
bundle_path = join_smart([self.dest_root_path, bundle_name])
# we'll compute the bundle's checksum
self.logger.info(f"Computing SHA512 checksum for bundle: '{bundle_path}'")
checksum_sha512 = sha512sum(bundle_path)
self.logger.info(f"Bundle '{bundle_path}' has SHA512 checksum '{checksum_sha512}'")
# now we'll compare the bundle's checksum
if bundle["checksum"]["sha512"] != checksum_sha512:
self.logger.info(f"SHA512 checksum at the time of bundle creation: {bundle['checksum']['sha512']}")
self.logger.info(f"SHA512 checksum of the file at the destination: {checksum_sha512}")
self.logger.info("These checksums do NOT match, and the Bundle will NOT be verified.")
right_now = now()
patch_body: Dict[str, Any] = {
"status": "quarantined",
"reason": f"BY:{self.name}-{self.instance_uuid} REASON:Checksum mismatch between creation and destination: {checksum_sha512}",
"work_priority_timestamp": right_now,
}
self.logger.info(f"PATCH /Bundles/{bundle_id} - '{patch_body}'")
await lta_rc.request('PATCH', f'/Bundles/{bundle_id}', patch_body)
return False
# update the Bundle in the LTA DB
self.logger.info("Destination checksum matches bundle creation checksum; the bundle is now verified.")
patch_body = {
"status": self.output_status,
"reason": "",
"update_timestamp": now(),
"claimed": False,
}
self.logger.info(f"PATCH /Bundles/{bundle_id} - '{patch_body}'")
await lta_rc.request('PATCH', f'/Bundles/{bundle_id}', patch_body)
return True
@wtt.spanned()
def _execute_myquota(self) -> Optional[str]:
"""Run the myquota command to determine disk usage at the site."""
completed_process = run(MYQUOTA_ARGS, stdout=PIPE, stderr=PIPE)
# if our command failed
if completed_process.returncode != 0:
self.logger.info(f"Command to check quota failed: {completed_process.args}")
self.logger.info(f"returncode: {completed_process.returncode}")
self.logger.info(f"stdout: {str(completed_process.stdout)}")
self.logger.info(f"stderr: {str(completed_process.stderr)}")
return None
# otherwise, we succeeded
return completed_process.stdout.decode("utf-8")
def runner() -> None:
"""Configure a SiteMoveVerifier component from the environment and set it running."""
# obtain our configuration from the environment
config = from_environment(EXPECTED_CONFIG)
# configure structured logging for the application
structured_formatter = StructuredFormatter(
component_type='SiteMoveVerifier',
component_name=config["COMPONENT_NAME"], # type: ignore[arg-type]
ndjson=True)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(structured_formatter)
root_logger = logging.getLogger(None)
root_logger.setLevel(logging.NOTSET)
root_logger.addHandler(stream_handler)
logger = logging.getLogger("lta.site_move_verifier")
# create our SiteMoveVerifier service
site_move_verifier = SiteMoveVerifier(config, logger) # type: ignore[arg-type]
# let's get to work
site_move_verifier.logger.info("Adding tasks to asyncio loop")
loop = asyncio.get_event_loop()
loop.create_task(status_loop(site_move_verifier))
loop.create_task(work_loop(site_move_verifier))
def main() -> None:
"""Configure a SiteMoveVerifier component from the environment and set it running."""
runner()
asyncio.get_event_loop().run_forever()
if __name__ == "__main__":
main()
```
#### File: lta/transfer/globus.py
```python
import os
import subprocess
import logging
from typing import Any, cast, Dict, Optional
from rest_tools.server import from_environment
EMPTY_STRING_SENTINEL_VALUE = "517c094b-739a-4a01-9d61-8d29eee99fda"
PROXY_CONFIG: Dict[str, Optional[str]] = {
"GLOBUS_PROXY_DURATION": "72",
"GLOBUS_PROXY_PASSPHRASE": EMPTY_STRING_SENTINEL_VALUE,
"GLOBUS_PROXY_VOMS_ROLE": EMPTY_STRING_SENTINEL_VALUE,
"GLOBUS_PROXY_VOMS_VO": EMPTY_STRING_SENTINEL_VALUE,
"GLOBUS_PROXY_OUTPUT": EMPTY_STRING_SENTINEL_VALUE,
}
logger = logging.getLogger('globus')
class SiteGlobusProxy(object):
"""
Manage site-wide globus proxy.
:param duration: proxy duration (optional, default 72 hours)
"""
def __init__(self, duration: Optional[int] = None):
"""Create a SiteGlobusProxy object."""
# load what we can from the environment
self.cfg = from_environment(PROXY_CONFIG)
# remove anything optional that wasn't specified
cfg_keys = list(self.cfg.keys())
for key in cfg_keys:
if self.cfg[key] == EMPTY_STRING_SENTINEL_VALUE:
del self.cfg[key]
# ensure duration is converted to an integer value
if "GLOBUS_PROXY_DURATION" in self.cfg:
self.cfg["GLOBUS_PROXY_DURATION"] = int(self.cfg["GLOBUS_PROXY_DURATION"])
# ensure we have at least an empty string for passphrase
if "GLOBUS_PROXY_PASSPHRASE" not in self.cfg:
self.cfg["GLOBUS_PROXY_PASSPHRASE"] = ""
# override the duration if specified during construction
if duration:
self.cfg['GLOBUS_PROXY_DURATION'] = duration
def set_duration(self, d: str) -> None:
"""Set the duration."""
self.cfg['GLOBUS_PROXY_DURATION'] = d
def set_passphrase(self, p: str) -> None:
"""Set the passphrase."""
self.cfg['GLOBUS_PROXY_PASSPHRASE'] = p
def set_voms_role(self, r: str) -> None:
"""Set the voms role."""
self.cfg['GLOBUS_PROXY_VOMS_ROLE'] = r
def set_voms_vo(self, vo: str) -> None:
"""Set the voms VO."""
self.cfg['GLOBUS_PROXY_VOMS_VO'] = vo
def update_proxy(self) -> None:
"""Update the proxy."""
logger.info('duration: %r', self.cfg['GLOBUS_PROXY_DURATION'])
if subprocess.call(['grid-proxy-info', '-e', '-valid', f'{self.cfg["GLOBUS_PROXY_DURATION"]}:0'],
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL):
# proxy needs updating
if 'GLOBUS_PROXY_VOMS_VO' in self.cfg and self.cfg['GLOBUS_PROXY_VOMS_VO']:
cmd = ['voms-proxy-init']
if 'GLOBUS_PROXY_VOMS_ROLE' in self.cfg and self.cfg['GLOBUS_PROXY_VOMS_ROLE']:
vo = self.cfg['GLOBUS_PROXY_VOMS_VO']
role = self.cfg['GLOBUS_PROXY_VOMS_ROLE']
cmd.extend(['-voms', '{0}:/{0}/Role={1}'.format(vo, role)])
else:
cmd.extend(['-voms', cast(str, self.cfg['GLOBUS_PROXY_VOMS_VO'])])
else:
cmd = ['grid-proxy-init']
cmd.extend(['-pwstdin', '-valid', f'{int(self.cfg["GLOBUS_PROXY_DURATION"])+1}:0'])
if 'GLOBUS_PROXY_OUTPUT' in self.cfg and self.cfg['GLOBUS_PROXY_OUTPUT']:
cmd.extend(['-out', cast(str, self.cfg['GLOBUS_PROXY_OUTPUT'])])
inputbytes = (cast(str, self.cfg['GLOBUS_PROXY_PASSPHRASE'])+'\n').encode('utf-8')
p = subprocess.run(cmd, input=inputbytes, capture_output=True, timeout=60, check=False)
logger.info('proxy cmd: %r', p.args)
logger.info('stdout: %s', p.stdout)
logger.info('stderr: %s', p.stderr)
if 'GLOBUS_PROXY_VOMS_VO' in self.cfg and self.cfg['GLOBUS_PROXY_VOMS_VO']:
for line in p.stdout.decode('utf-8').split('\n'):
if line.startswith('Creating proxy') and line.endswith('Done'):
break # this is a good proxy
else:
raise Exception('voms-proxy-init failed')
elif p.returncode > 0:
raise Exception('grid-proxy-init failed')
def get_proxy(self) -> Any:
"""Get the proxy location."""
if 'GLOBUS_PROXY_OUTPUT' in self.cfg and self.cfg['GLOBUS_PROXY_OUTPUT']:
return self.cfg['GLOBUS_PROXY_OUTPUT']
FNULL = open(os.devnull, 'w')
return subprocess.check_output(['grid-proxy-info', '-path'],
stderr=FNULL).decode('utf-8').strip()
```
#### File: lta/resources/make_transfer_request.py
```python
import asyncio
from rest_tools.client import RestClient # type: ignore
from rest_tools.server import from_environment # type: ignore
import sys
EXPECTED_CONFIG = {
'LTA_REST_TOKEN': None,
'LTA_REST_URL': None
}
async def main():
# make sure we were given source and destination
if len(sys.argv) < 3:
print("Usage: make_transfer_request.py <source_site> <dest_site> <path>")
return
# construct the TransferRequest body
request_body = {
"source": sys.argv[1],
"dest": sys.argv[2],
"path": sys.argv[3],
}
# configure a RestClient from the environment
config = from_environment(EXPECTED_CONFIG)
rc = RestClient(config["LTA_REST_URL"], token=config["LTA_REST_TOKEN"])
# attempt to post the TransferRequest to the LTA DB
try:
response = await rc.request("POST", "/TransferRequests", request_body)
print(response)
except Exception as e:
print(e)
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
```
#### File: lta/resources/test_data_helper.py
```python
import asyncio
import os
from secrets import token_hex
import sys
from uuid import uuid4
from rest_tools.client import RestClient # type: ignore
from rest_tools.server import from_environment # type: ignore
from lta.crypto import sha512sum
EXPECTED_CONFIG = {
'FAKE_CHECKSUM': "False",
'FILE_CATALOG_REST_TOKEN': None,
'FILE_CATALOG_REST_URL': None,
'LTA_REST_TOKEN': None,
'LTA_REST_URL': None,
}
async def add_catalog(site, path):
# configure a RestClient from the environment
config = from_environment(EXPECTED_CONFIG)
rc = RestClient(config["FILE_CATALOG_REST_URL"], token=config["FILE_CATALOG_REST_TOKEN"])
# for each (dirpath, dirnames, filenames) tuple in the walk
for root, dirs, files in os.walk(path):
# don't recurse into deeper subdirectories
if root != path:
continue
# for each file in our directory
for data_file in files:
# determine the logical name of the file
logical_name = os.path.join(root, data_file)
# create a catalog record for it
file_record = {
"uuid": str(uuid4()),
"logical_name": logical_name,
"checksum": {
"sha512": token_hex(64),
},
"locations": [
{
"site": f"{site}",
"path": logical_name,
}
],
"file_size": os.path.getsize(logical_name),
}
# if we're being pedantic about real checksums in test data
if config["FAKE_CHECKSUM"] != "True":
file_record["checksum"]["sha512"] = sha512sum(logical_name)
# add the file to the File Catalog
try:
print(f"POST /api/files - {logical_name}")
response = await rc.request("POST", "/api/files", file_record)
except Exception as e:
# whoopsy daisy...
print(e)
async def clear_catalog():
# configure a RestClient from the environment
config = from_environment(EXPECTED_CONFIG)
rc = RestClient(config["FILE_CATALOG_REST_URL"], token=config["FILE_CATALOG_REST_TOKEN"])
# while there are still files
clearing = True
while clearing:
try:
# get a list of up to 50 files
response = await rc.request("GET", "/api/files?start=0&limit=50")
files = response["files"]
# for each file that we found
for x in files:
# remove it from the file catalog
uuid = x["uuid"]
logical_name = x["logical_name"]
print(f"DELETE /api/files/{uuid} - {logical_name}")
response2 = await rc.request("DELETE", f"/api/files/{uuid}")
# if we didn't get any files back, we're done
if len(files) < 1:
clearing = False
except Exception as e:
# whoopsy daisy...
clearing = False
print(e)
async def clear_lta_bundles():
# configure a RestClient from the environment
config = from_environment(EXPECTED_CONFIG)
rc = RestClient(config["LTA_REST_URL"], token=config["LTA_REST_TOKEN"])
# while there are still bundles
clearing = True
while clearing:
try:
# get a list of all the bundles in the LTA DB
response = await rc.request("GET", "/Bundles")
results = response["results"]
# for each bundle that we found
for uuid in results:
# remove it from the LTA DB
print(f"DELETE /Bundles/{uuid}")
response2 = await rc.request("DELETE", f"/Bundles/{uuid}")
# if we didn't get any files back, we're done
if len(results) < 1:
clearing = False
except Exception as e:
# whoopsy daisy...
clearing = False
print(e)
async def clear_lta_transfer_requests():
# configure a RestClient from the environment
config = from_environment(EXPECTED_CONFIG)
rc = RestClient(config["LTA_REST_URL"], token=config["LTA_REST_TOKEN"])
# while there are still transfer requests
clearing = True
while clearing:
try:
# get a list of up to 50 transfer requests
# technically a lie; the LTA DB honors neither start nor limit
response = await rc.request("GET", "/TransferRequests?start=0&limit=50")
results = response["results"]
# for each file that we found
for x in results:
# remove it from the file catalog
uuid = x["uuid"]
print(f"DELETE /TransferRequests/{uuid}")
response2 = await rc.request("DELETE", f"/TransferRequests/{uuid}")
# if we didn't get any files back, we're done
if len(results) < 1:
clearing = False
except Exception as e:
# whoopsy daisy...
clearing = False
print(e)
async def main():
# make sure we were given a subcommand
if len(sys.argv) < 2:
print("Usage: test_data_helper.py [add-catalog <site> <path> | clear-catalog | clear-lta-transfer-requests]")
return
# obtain the subcommand
subcommand = sys.argv[1]
# if we're adding files to the catalog
if subcommand == "add-catalog":
if len(sys.argv) >= 4:
await add_catalog(sys.argv[2], sys.argv[3])
else:
print(f"test_data_helper.py: Subcommand '{subcommand}' missing <site> and <path> arguments")
return
# if we're clearing files from the catalog
elif subcommand == "clear-catalog":
await clear_catalog()
# if we're clearing bundles from the LTA DB
elif subcommand == "clear-lta-bundles":
await clear_lta_bundles()
# if we're clearing transfer requests from the LTA DB
elif subcommand == "clear-lta-transfer-requests":
await clear_lta_transfer_requests()
# otherwise, what the heck is the user trying to do?
else:
print(f"test_data_helper.py: Unknown subcommand '{subcommand}'")
return
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
```
#### File: lta/tests/test_component.py
```python
from asyncio import Future
from unittest.mock import call, MagicMock
from uuid import uuid1
import pytest # type: ignore
import requests
from tornado.web import HTTPError # type: ignore
from lta.component import patch_status_heartbeat, status_loop, work_loop
from lta.picker import main, Picker
from .test_util import AsyncMock, ObjectLiteral
@pytest.fixture
def config():
"""Supply a stock Picker component configuration."""
return {
"COMPONENT_NAME": "testing-picker",
"FILE_CATALOG_REST_TOKEN": "<PASSWORD>-file-catalog-rest-token",
"FILE_CATALOG_REST_URL": "http://kVj74wBA1AMTDV8zccn67pGuWJqHZzD7iJQHrUJKA.com/",
"HEARTBEAT_PATCH_RETRIES": "3",
"HEARTBEAT_PATCH_TIMEOUT_SECONDS": "30",
"HEARTBEAT_SLEEP_DURATION_SECONDS": "60",
"LTA_REST_TOKEN": "fake-lta-rest-token",
"LTA_REST_URL": "http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/",
"WORK_RETRIES": "3",
"WORK_SLEEP_DURATION_SECONDS": "60",
"WORK_TIMEOUT_SECONDS": "30"
}
def test_always_succeed():
"""Succeed with flying colors."""
assert True
def xtest_constructor_missing_config():
"""Fail with a TypeError if a configuration object isn't provided."""
with pytest.raises(TypeError):
Picker()
def xtest_constructor_missing_logging():
"""Fail with a TypeError if a logging object isn't provided."""
with pytest.raises(TypeError):
config = {
"PAN_GALACTIC_GARGLE_BLASTER": "Yummy"
}
Picker(config)
def xtest_constructor_config_missing_values(mocker):
"""Fail with a ValueError if the configuration object is missing required configuration variables."""
config = {
"PAN_GALACTIC_GARGLE_BLASTER": "Yummy"
}
logger_mock = mocker.MagicMock()
with pytest.raises(ValueError):
Picker(config, logger_mock)
def xtest_constructor_config_poison_values(config, mocker):
"""Fail with a ValueError if the configuration object is missing required configuration variables."""
picker_config = config.copy()
picker_config["LTA_REST_URL"] = None
logger_mock = mocker.MagicMock()
with pytest.raises(ValueError):
Picker(picker_config, logger_mock)
def xtest_constructor_config(config, mocker):
"""Test that a Picker can be constructed with a configuration object and a logging object."""
logger_mock = mocker.MagicMock()
p = Picker(config, logger_mock)
assert p.file_catalog_rest_url == "http://kVj74wBA1AMTDV8zccn67pGuWJqHZzD7iJQHrUJKA.com/"
assert p.heartbeat_sleep_duration_seconds == 60
assert p.lta_rest_url == "http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/"
assert p.name == "testing-picker"
assert p.work_sleep_duration_seconds == 60
assert p.logger == logger_mock
def xtest_constructor_config_sleep_type_int(config, mocker):
"""Ensure that sleep seconds can also be provided as an integer."""
logger_mock = mocker.MagicMock()
p = Picker(config, logger_mock)
assert p.file_catalog_rest_url == "http://kVj74wBA1AMTDV8zccn67pGuWJqHZzD7iJQHrUJKA.com/"
assert p.heartbeat_sleep_duration_seconds == 60
assert p.lta_rest_url == "http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/"
assert p.name == "testing-picker"
assert p.work_sleep_duration_seconds == 60
assert p.logger == logger_mock
def xtest_constructor_state(config, mocker):
"""Verify that the Picker has a reasonable state when it is first constructed."""
logger_mock = mocker.MagicMock()
p = Picker(config, logger_mock)
assert p.last_work_begin_timestamp is p.last_work_end_timestamp
@pytest.mark.asyncio
async def xtest_patch_status_heartbeat_connection_error(config, mocker):
"""
Verify Picker behavior when status heartbeat patches fail.
The Picker will change state to indicate that its connection to LTA is
not OK, and it will log an error, if the PATCH call results in a
ConnectionError being raised.
"""
patch_mock = mocker.patch("rest_tools.client.RestClient.request")
patch_mock.side_effect = requests.exceptions.HTTPError
logger_mock = mocker.MagicMock()
p = Picker(config, logger_mock)
assert p.lta_ok is False
p.lta_ok = True
assert p.lta_ok is True
await patch_status_heartbeat(p)
assert p.lta_ok is False
logger_mock.error.assert_called()
@pytest.mark.asyncio
async def xtest_patch_status_heartbeat_patch_call(config, mocker):
"""
Verify Picker behavior when status heartbeat patches succeed.
Test that the Picker calls the proper URL for the PATCH /status/{component}
route, and on success (200), updates its internal status to say that the
connection to LTA is OK.
"""
patch_mock = mocker.patch("rest_tools.client.RestClient.request")
patch_mock.return_value = Future()
patch_mock.return_value.set_result(ObjectLiteral(
status_code=200
))
logger_mock = mocker.MagicMock()
p = Picker(config, logger_mock)
assert p.lta_ok is False
retVal = await patch_status_heartbeat(p)
assert p.lta_ok is True
assert retVal is True
patch_mock.assert_called_with("PATCH", "/status/picker", mocker.ANY)
logger_mock.assert_not_called()
@pytest.mark.asyncio
async def xtest_patch_status_heartbeat_patch_call_data(config, mocker):
"""
Verify Picker behavior when status heartbeat patches succeed.
Test that the Picker provides proper status data to the
PATCH /status/{component} route.
"""
patch_mock = mocker.patch("rest_tools.client.RestClient.request")
patch_mock.return_value = Future()
patch_mock.return_value.set_result(ObjectLiteral(
status_code=200
))
logger_mock = mocker.MagicMock()
picker_config = config.copy()
picker_config["PICKER_NAME"] = "special-picker-name"
p = Picker(picker_config, logger_mock)
assert p.lta_ok is False
retVal = await patch_status_heartbeat(p)
assert p.lta_ok is True
assert retVal is True
patch_mock.assert_called_with(mocker.ANY, mocker.ANY, {
"special-picker-name": {
"timestamp": mocker.ANY,
"file_catalog_ok": False,
"last_work_begin_timestamp": mocker.ANY,
"last_work_end_timestamp": mocker.ANY,
"lta_ok": False
}
})
logger_mock.assert_not_called()
@pytest.mark.asyncio
async def xtest_patch_status_heartbeat_patch_call_4xx(config, mocker):
"""
Verify Picker behavior when status heartbeat patches fail.
The Picker will change state to indicate that its connection to LTA is
not OK, and that it will log an error, if the PATCH call results in a
4xx series response.
"""
patch_mock = mocker.patch("rest_tools.client.RestClient.request")
patch_mock.side_effect = requests.exceptions.HTTPError("400 Bad Request")
logger_mock = mocker.MagicMock()
p = Picker(config, logger_mock)
assert p.lta_ok is False
p.lta_ok = True
assert p.lta_ok is True
await patch_status_heartbeat(p)
assert p.lta_ok is False
logger_mock.error.assert_called()
@pytest.mark.asyncio
async def xtest_status_loop(config, mocker):
"""Ensure the status loop will loop."""
# NOTE: The Exception() is a hack to get around the infinite loop in status_loop()
patch_mock = mocker.patch("lta.picker.patch_status_heartbeat", new_callable=AsyncMock)
patch_mock.side_effect = [True, Exception()]
sleep_mock = mocker.patch("asyncio.sleep", new_callable=AsyncMock)
sleep_mock.side_effect = [None, None]
logger_mock = mocker.MagicMock()
p = Picker(config, logger_mock)
# NOTE: This is a hack to get around the infinite loop in status_loop()
try:
await status_loop(p)
assert False, "This should have exited with an Exception"
except Exception:
pass
patch_mock.assert_called_with(p)
sleep_mock.assert_called_with(60)
@pytest.mark.asyncio
async def xtest_work_loop(config, mocker):
"""Ensure the work loop will loop."""
# NOTE: The Exception() is a hack to get around the infinite loop in work_loop()
run_mock = mocker.patch("lta.picker.Picker.run", new_callable=AsyncMock)
run_mock.side_effect = [None, Exception()]
sleep_mock = mocker.patch("asyncio.sleep", new_callable=AsyncMock)
sleep_mock.side_effect = [None, None]
logger_mock = mocker.MagicMock()
picker_config = config.copy()
picker_config["WORK_SLEEP_DURATION_SECONDS"] = "300"
p = Picker(picker_config, logger_mock)
# NOTE: This is a hack to get around the infinite loop in work_loop()
try:
await work_loop(p)
assert False, "This should have exited with an Exception"
except Exception:
pass
run_mock.assert_called()
sleep_mock.assert_called_with(300)
@pytest.mark.asyncio
async def xtest_script_main(config, mocker, monkeypatch):
"""
Verify Picker component behavior when run as a script.
Test to make sure running the Picker as a script does the setup work
that we expect and then launches the picker service.
"""
for key in config.keys():
monkeypatch.setenv(key, config[key])
mock_event_loop = mocker.patch("asyncio.get_event_loop")
mock_root_logger = mocker.patch("logging.getLogger")
mock_status_loop = mocker.patch("lta.picker.status_loop")
mock_work_loop = mocker.patch("lta.picker.work_loop")
main()
mock_event_loop.assert_called()
mock_root_logger.assert_called()
mock_status_loop.assert_called()
mock_work_loop.assert_called()
@pytest.mark.asyncio
async def xtest_picker_logs_configuration(mocker):
"""Test to make sure the Picker logs its configuration."""
logger_mock = mocker.MagicMock()
picker_config = {
"FILE_CATALOG_REST_TOKEN": "<PASSWORD>-fake-file-catalog-rest-token",
"FILE_CATALOG_REST_URL": "logme-http://kVj74wBA1AMTDV8zccn67pGuWJqHZzD7iJQHrUJKA.com/",
"HEARTBEAT_PATCH_RETRIES": "1",
"HEARTBEAT_PATCH_TIMEOUT_SECONDS": "20",
"HEARTBEAT_SLEEP_DURATION_SECONDS": "30",
"LTA_REST_TOKEN": "<PASSWORD>-lta-rest-token",
"LTA_REST_URL": "logme-http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/",
"PICKER_NAME": "logme-testing-picker",
"WORK_RETRIES": "5",
"WORK_SLEEP_DURATION_SECONDS": "70",
"WORK_TIMEOUT_SECONDS": "90"
}
Picker(picker_config, logger_mock)
EXPECTED_LOGGER_CALLS = [
call("Picker 'logme-testing-picker' is configured:"),
call('FILE_CATALOG_REST_TOKEN = logme-fake-file-catalog-rest-token'),
call('FILE_CATALOG_REST_URL = logme-http://kVj74wBA1AMTDV8zccn67pGuWJqHZzD7iJQHrUJKA.com/'),
call('HEARTBEAT_PATCH_RETRIES = 1'),
call('HEARTBEAT_PATCH_TIMEOUT_SECONDS = 20'),
call('HEARTBEAT_SLEEP_DURATION_SECONDS = 30'),
call('LTA_REST_TOKEN = <PASSWORD>-lta-rest-token'),
call('LTA_REST_URL = logme-http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/'),
call('PICKER_NAME = logme-testing-picker'),
call('WORK_RETRIES = 5'),
call('WORK_SLEEP_DURATION_SECONDS = 70'),
call('WORK_TIMEOUT_SECONDS = 90')
]
logger_mock.info.assert_has_calls(EXPECTED_LOGGER_CALLS)
@pytest.mark.asyncio
async def xtest_picker_run(config, mocker):
"""Test the Picker does the work the picker should do."""
logger_mock = mocker.MagicMock()
p = Picker(config, logger_mock)
p._do_work = AsyncMock()
await p.run()
p._do_work.assert_called()
@pytest.mark.asyncio
async def xtest_picker_run_exception(config, mocker):
"""Test an error doesn't kill the Picker."""
logger_mock = mocker.MagicMock()
p = Picker(config, logger_mock)
p.last_work_end_timestamp = None
p._do_work = AsyncMock()
p._do_work.side_effect = [Exception("bad thing happen!")]
await p.run()
p._do_work.assert_called()
assert p.last_work_end_timestamp
@pytest.mark.asyncio
async def xtest_picker_do_work_pop_exception(config, mocker):
"""Test that _do_work raises when the RestClient can't pop."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
lta_rc_mock.side_effect = HTTPError(500, "LTA DB on fire. Again.")
p = Picker(config, logger_mock)
with pytest.raises(HTTPError):
await p._do_work()
lta_rc_mock.assert_called_with("POST", '/TransferRequests/actions/pop?source=WIPAC', {'picker': 'testing-picker'})
@pytest.mark.asyncio
async def xtest_picker_do_work_no_results(config, mocker):
"""Test that _do_work goes on vacation when the LTA DB has no work."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
lta_rc_mock.return_value = {
"results": []
}
p = Picker(config, logger_mock)
await p._do_work()
lta_rc_mock.assert_called_with("POST", '/TransferRequests/actions/pop?source=WIPAC', {'picker': 'testing-picker'})
@pytest.mark.asyncio
async def xtest_picker_do_work_yes_results(config, mocker):
"""Test that _do_work processes each TransferRequest it gets from the LTA DB."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
lta_rc_mock.return_value = {
"results": [
{
"one": 1
},
{
"two": 2
},
{
"three": 3
}
]
}
dwtr_mock = mocker.patch("lta.picker.Picker._do_work_transfer_request", new_callable=AsyncMock)
p = Picker(config, logger_mock)
await p._do_work()
lta_rc_mock.assert_called_with("POST", '/TransferRequests/actions/pop?source=WIPAC', {'picker': 'testing-picker'})
dwtr_mock.assert_called_with(mocker.ANY, {"three": 3})
@pytest.mark.asyncio
async def xtest_picker_do_work_transfer_request_fc_exception(config, mocker):
"""Test that _do_work_transfer_request raises an exception if the File Catalog has an error."""
logger_mock = mocker.MagicMock()
p = Picker(config, logger_mock)
lta_rc_mock = MagicMock()
tr = {
"source": "WIPAC:/tmp/this/is/just/a/test",
"dest": [
"DESY:/tmp/this/is/just/a/test",
"NERSC:/tmp/this/is/just/a/test"
]
}
fc_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
fc_rc_mock.side_effect = HTTPError(500, "LTA DB on fire. Again.")
with pytest.raises(HTTPError):
await p._do_work_transfer_request(lta_rc_mock, tr)
fc_rc_mock.assert_called_with("GET", '/api/files?query={"locations.site": {"$eq": "WIPAC"}, "locations.path": {"$regex": "^/tmp/this/is/just/a/test"}}')
@pytest.mark.asyncio
async def xtest_picker_do_work_transfer_request_fc_no_results(config, mocker):
"""Test that _do_work_transfer_request raises an exception when the LTA DB refuses to create an empty list."""
logger_mock = mocker.MagicMock()
p = Picker(config, logger_mock)
lta_rc_mock = mocker.MagicMock()
lta_rc_mock.request = AsyncMock()
lta_rc_mock.request.side_effect = HTTPError(400, reason="files field is empty")
tr = {
"source": "WIPAC:/tmp/this/is/just/a/test",
"dest": [
"DESY:/tmp/this/is/just/a/test",
"NERSC:/tmp/this/is/just/a/test"
]
}
fc_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
fc_rc_mock.return_value = {
"files": []
}
with pytest.raises(HTTPError):
await p._do_work_transfer_request(lta_rc_mock, tr)
fc_rc_mock.assert_called_with("GET", '/api/files?query={"locations.site": {"$eq": "WIPAC"}, "locations.path": {"$regex": "^/tmp/this/is/just/a/test"}}')
lta_rc_mock.request.assert_called_with("POST", '/Files/actions/bulk_create', {'files': []})
@pytest.mark.asyncio
async def xtest_picker_do_work_transfer_request_fc_yes_results(config, mocker):
"""Test that _do_work_transfer_request processes each file it gets back from the File Catalog."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.MagicMock()
lta_rc_mock.request = AsyncMock()
lta_rc_mock.request.return_value = {
"files": [uuid1().hex, uuid1().hex, uuid1().hex]
}
tr = {
"uuid": "a2647c96-b12a-4fb4-a9c3-3c527b771f6f",
"source": "WIPAC:/tmp/this/is/just/a/test",
"dest": [
"DESY:/tmp/this/is/just/a/test",
"NERSC:/tmp/this/is/just/a/test"
]
}
fc_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
fc_rc_mock.return_value = {
"_links": {
"parent": {
"href": "/api"
},
"self": {
"href": "/api/files"
}
},
"files": [
{"logical_name": "/data/exp/IceCube/2013/filtered/PFFilt/1109/PFFilt_PhysicsFiltering_Run00123231_Subrun00000000_00000000.tar.bz2",
"uuid": "65983278-3322-4754-9e5a-1f1c1e118fbc"},
{"logical_name": "/data/exp/IceCube/2013/filtered/PFFilt/1109/PFFilt_PhysicsFiltering_Run00123231_Subrun00000000_00000001.tar.bz2",
"uuid": "aaee52f2-f903-43d3-b5da-2e19880e1312"},
{"logical_name": "/data/exp/IceCube/2013/filtered/PFFilt/1109/PFFilt_PhysicsFiltering_Run00123231_Subrun00000000_00000002.tar.bz2",
"uuid": "a336aa2b-83d8-4056-8fc1-1a0b72bce7c4"}]
}
dwcf_mock = mocker.patch("lta.picker.Picker._do_work_catalog_file", new_callable=AsyncMock)
dwcf_mock.return_value = [{}, {}, {}]
p = Picker(config, logger_mock)
await p._do_work_transfer_request(lta_rc_mock, tr)
fc_rc_mock.assert_called_with("GET", '/api/files?query={"locations.site": {"$eq": "WIPAC"}, "locations.path": {"$regex": "^/tmp/this/is/just/a/test"}}')
lta_rc_mock.request.assert_called_with("DELETE", '/TransferRequests/a2647c96-b12a-4fb4-a9c3-3c527b771f6f')
dests = [('DESY', '/tmp/this/is/just/a/test'), ('NERSC', '/tmp/this/is/just/a/test')]
third_file = {"logical_name": "/data/exp/IceCube/2013/filtered/PFFilt/1109/PFFilt_PhysicsFiltering_Run00123231_Subrun00000000_00000002.tar.bz2",
"uuid": "a336aa2b-83d8-4056-8fc1-1a0b72bce7c4"}
dwcf_mock.assert_called_with(lta_rc_mock, tr, mocker.ANY, dests, third_file)
@pytest.mark.asyncio
async def xtest_picker_do_work_catalog_file_fc_exception(config, mocker):
"""Test that _do_work_catalog_file raises an exception if the File Catalog has an error."""
logger_mock = mocker.MagicMock()
p = Picker(config, logger_mock)
lta_rc_mock = MagicMock()
lta_rc_mock.request = AsyncMock()
lta_rc_mock.request.side_effect = HTTPError(500, "hey! this shouldn't get called in this test!")
tr = {
"source": "WIPAC:/tmp/this/is/just/a/test",
"dest": [
"DESY:/tmp/this/is/just/a/test",
"NERSC:/tmp/this/is/just/a/test"
]
}
fc_rc_mock = MagicMock()
fc_rc_mock.request = AsyncMock()
fc_rc_mock.request.side_effect = HTTPError(500, "File Catalog on fire. Again.")
dests = [('DESY', '/tmp/this/is/just/a/test'), ('NERSC', '/tmp/this/is/just/a/test')]
catalog_file = {"logical_name": "/data/exp/IceCube/2013/filtered/PFFilt/1109/PFFilt_PhysicsFiltering_Run00123231_Subrun00000000_00000002.tar.bz2",
"uuid": "a336aa2b-83d8-4056-8fc1-1a0b72bce7c4"}
with pytest.raises(HTTPError):
await p._do_work_catalog_file(lta_rc_mock, tr, fc_rc_mock, dests, catalog_file)
fc_rc_mock.request.assert_called_with("GET", '/api/files/a336aa2b-83d8-4056-8fc1-1a0b72bce7c4')
lta_rc_mock.request.assert_not_called()
# @pytest.mark.asyncio
# async def xtest_picker_do_work_catalog_file_fc_no_result(config, mocker):
# normally we'd write a test here, but it would be the same as the last one
# except it'd be a 404 instead of a 500 that prompted the HTTPError
# so, imagine the last test, but with a 404; ahhh, coverage bliss.
@pytest.mark.asyncio
async def xtest_picker_do_work_catalog_file_fc_yes_result(config, mocker):
"""Test that _do_work_catalog_file returns File objects for both destinations."""
logger_mock = mocker.MagicMock()
p = Picker(config, logger_mock)
lta_rc_mock = MagicMock()
lta_rc_mock.request = AsyncMock()
lta_rc_mock.request.side_effect = HTTPError(500, "hey! this shouldn't get called in this test!")
tr = {
"uuid": "c9a23a20-92d6-49eb-a63e-0f73ac632146",
"source": "WIPAC:/tmp/this/is/just/a/test",
"dest": [
"DESY:/tmp/this/is/just/a/test",
"NERSC:/tmp/this/is/just/a/test"
]
}
fc_rc_mock = MagicMock()
fc_rc_mock.request = AsyncMock()
catalog_record = {
"_id": "5b6df684e1382307f078be02",
"logical_name": "/data/exp/IceCube/2013/filtered/PFFilt/1109/PFFilt_PhysicsFiltering_Run00123231_Subrun00000000_00000002.tar.bz2",
"uuid": "a336aa2b-83d8-4056-8fc1-1a0b72bce7c4",
"checksum": {"sha512": "ae7c1639aeaacbd69b8540a117e71a6a92b5e4eff0d7802150609daa98d99fd650f8285e26af23f97f441f3047afbce88ad54bb3feb4fe243a429934d0ee4211"},
"locations": [{"path": "/data/exp/IceCube/2013/filtered/PFFilt/1109/PFFilt_PhysicsFiltering_Run00123231_Subrun00000000_00000002.tar.bz2",
"site": "WIPAC"}],
"file_size": 104136149,
"meta_modify_date": "2018-10-30 17:28:22.757029",
"final_analysis_sample": {"collection_tag": "bae45fdd-8e26-47a2-92cc-75b96c105c64"}
}
fc_rc_mock.request.return_value = catalog_record
dests = [('DESY', '/tmp/this/is/just/a/test'), ('NERSC', '/tmp/this/is/just/a/test')]
catalog_file = {"logical_name": "/data/exp/IceCube/2013/filtered/PFFilt/1109/PFFilt_PhysicsFiltering_Run00123231_Subrun00000000_00000002.tar.bz2",
"uuid": "a336aa2b-83d8-4056-8fc1-1a0b72bce7c4"}
bulk_create = await p._do_work_catalog_file(lta_rc_mock, tr, fc_rc_mock, dests, catalog_file)
fc_rc_mock.request.assert_called_with("GET", '/api/files/a336aa2b-83d8-4056-8fc1-1a0b72bce7c4')
lta_rc_mock.request.assert_not_called()
assert bulk_create == [
{
"source": "WIPAC:/tmp/this/is/just/a/test",
"dest": "DESY:/tmp/this/is/just/a/test",
"request": "c9a23a20-92d6-49eb-a63e-0f73ac632146",
"catalog": catalog_record
},
{
"source": "WIPAC:/tmp/this/is/just/a/test",
"dest": "NERSC:/tmp/this/is/just/a/test",
"request": "c9a23a20-92d6-49eb-a63e-0f73ac632146",
"catalog": catalog_record
}
]
@pytest.mark.asyncio
async def xtest_picker_do_work_catalog_file_fc_yes_result_only_one(config, mocker):
"""Test that _do_work_catalog_file returns File objects for one destination."""
logger_mock = mocker.MagicMock()
p = Picker(config, logger_mock)
lta_rc_mock = MagicMock()
lta_rc_mock.request = AsyncMock()
lta_rc_mock.request.side_effect = HTTPError(500, "hey! this shouldn't get called in this test!")
tr = {
"uuid": "c9a23a20-92d6-49eb-a63e-0f73ac632146",
"source": "WIPAC:/data/exp/IceCube/2013/filtered/PFFilt/1109",
"dest": [
"DESY:/tmp/this/is/just/a/test",
"NERSC:/tmp/this/is/just/a/test"
]
}
fc_rc_mock = MagicMock()
fc_rc_mock.request = AsyncMock()
catalog_record = {
"_id": "5b6df684e1382307f078be02",
"logical_name": "/data/exp/IceCube/2013/filtered/PFFilt/1109/PFFilt_PhysicsFiltering_Run00123231_Subrun00000000_00000002.tar.bz2",
"uuid": "a336aa2b-83d8-4056-8fc1-1a0b72bce7c4",
"checksum": {
"sha512": "ae7c1639aeaacbd69b8540a117e71a6a92b5e4eff0d7802150609daa98d99fd650f8285e26af23f97f441f3047afbce88ad54bb3feb4fe243a429934d0ee4211"
},
"locations": [
{
"site": "WIPAC",
"path": "/data/exp/IceCube/2013/filtered/PFFilt/1109/PFFilt_PhysicsFiltering_Run00123231_Subrun00000000_00000002.tar.bz2"
},
{
"site": "NERSC",
"path": "/tmp/this/is/just/a/test/1b71f86f-18a1-4d90-b88e-7505feda3ce6.zip",
"archive": True
}
],
"file_size": 104136149,
"meta_modify_date": "2018-10-30 17:28:22.757029",
"final_analysis_sample": {
"collection_tag": "bae45fdd-8e26-47a2-92cc-75b96c105c64"
}
}
fc_rc_mock.request.return_value = catalog_record
dests = [('DESY', '/tmp/this/is/just/a/test'), ('NERSC', '/tmp/this/is/just/a/test')]
catalog_file = {"logical_name": "/data/exp/IceCube/2013/filtered/PFFilt/1109/PFFilt_PhysicsFiltering_Run00123231_Subrun00000000_00000002.tar.bz2", "uuid": "a336aa2b-83d8-4056-8fc1-1a0b72bce7c4"}
bulk_create = await p._do_work_catalog_file(lta_rc_mock, tr, fc_rc_mock, dests, catalog_file)
fc_rc_mock.request.assert_called_with("GET", '/api/files/a336aa2b-83d8-4056-8fc1-1a0b72bce7c4')
lta_rc_mock.request.assert_not_called()
# this one is failing because the picker code isn't doing startswith() on the dest path
assert bulk_create == [
{
"source": "WIPAC:/data/exp/IceCube/2013/filtered/PFFilt/1109",
"dest": "DESY:/tmp/this/is/just/a/test",
"request": "c9a23a20-92d6-49eb-a63e-0f73ac632146",
"catalog": catalog_record
}
]
```
#### File: lta/tests/test_crypto.py
```python
import os
from tempfile import NamedTemporaryFile
from lta.crypto import adler32sum, sha512sum, lta_checksums
def test_adler32sum_tempfile(mocker):
"""Test that adler32sum hashes a temporary file correctly."""
with NamedTemporaryFile(mode="wb", delete=False) as temp:
temp.write(bytearray("The quick brown fox jumps over the lazy dog\n", "utf8"))
temp.close()
hashsum = adler32sum(temp.name)
assert hashsum == "6bc00fe4"
os.remove(temp.name)
def test_sha512sum_tempfile(mocker):
"""Test that sha512sum hashes a temporary file correctly."""
with NamedTemporaryFile(mode="wb", delete=False) as temp:
temp.write(bytearray("The quick brown fox jumps over the lazy dog\n", "utf8"))
temp.close()
hashsum = sha512sum(temp.name)
assert hashsum == "a12ac6bdd854ac30c5cc5b576e1ee2c060c0d8c2bec8797423d7119aa2b962f7f30ce2e39879cbff0109c8f0a3fd9389a369daae45df7d7b286d7d98272dc5b1"
os.remove(temp.name)
def test_lta_checksums_tempfile(mocker):
"""Test that lta_checksums hashes a temporary file correctly."""
with NamedTemporaryFile(mode="wb", delete=False) as temp:
temp.write(bytearray("The quick brown fox jumps over the lazy dog\n", "utf8"))
temp.close()
hashsum = lta_checksums(temp.name)
assert hashsum["adler32"] == "6bc00fe4"
assert hashsum["sha512"] == "<KEY>"
os.remove(temp.name)
```
#### File: lta/tests/test_site_move_verifier.py
```python
from unittest.mock import call, MagicMock
import pytest # type: ignore
from tornado.web import HTTPError # type: ignore
from lta.site_move_verifier import as_nonempty_columns, discard_empty, MYQUOTA_ARGS, parse_myquota
from lta.site_move_verifier import main, SiteMoveVerifier
from .test_util import AsyncMock, ObjectLiteral
@pytest.fixture
def config():
"""Supply a stock SiteMoveVerifier component configuration."""
return {
"COMPONENT_NAME": "testing-site_move_verifier",
"DEST_ROOT_PATH": "/path/to/rse",
"DEST_SITE": "NERSC",
"HEARTBEAT_PATCH_RETRIES": "3",
"HEARTBEAT_PATCH_TIMEOUT_SECONDS": "30",
"HEARTBEAT_SLEEP_DURATION_SECONDS": "60",
"INPUT_STATUS": "transferring",
"LTA_REST_TOKEN": "<PASSWORD>-lta-rest-token",
"LTA_REST_URL": "http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/",
"OUTPUT_STATUS": "taping",
"RUN_ONCE_AND_DIE": "False",
"SOURCE_SITE": "WIPAC",
"USE_FULL_BUNDLE_PATH": "FALSE",
"WORK_RETRIES": "3",
"WORK_SLEEP_DURATION_SECONDS": "60",
"WORK_TIMEOUT_SECONDS": "30",
}
def test_as_nonempty_columns():
"""Test that test_as_nonempty_columns does what it says on the tin."""
assert as_nonempty_columns("FILESYSTEM SPACE_USED SPACE_QUOTA SPACE_PCT INODE_USED INODE_QUOTA INODE_PCT") == ["FILESYSTEM", "SPACE_USED", "SPACE_QUOTA", "SPACE_PCT", "INODE_USED", "INODE_QUOTA", "INODE_PCT"]
assert as_nonempty_columns("cscratch1 7638.60GiB 51200.00GiB 14.9% 0.00G 0.01G 0.1%") == ["cscratch1", "7638.60GiB", "51200.00GiB", "14.9%", "0.00G", "0.01G", "0.1%"]
def test_discard_empty():
"""Test that discard_empty does what it says on the tin."""
assert not discard_empty(None)
assert not discard_empty("")
assert discard_empty("alice")
def test_parse_myquota():
"""Test that parse_myquota provides expected output."""
stdout = """FILESYSTEM SPACE_USED SPACE_QUOTA SPACE_PCT INODE_USED INODE_QUOTA INODE_PCT
home 1.90GiB 40.00GiB 4.7% 44.00 1.00M 0.0%
cscratch1 12.00KiB 20.00TiB 0.0% 3.00 10.00M 0.0%
"""
assert parse_myquota(stdout) == [
{
"FILESYSTEM": "home",
"SPACE_USED": "1.90GiB",
"SPACE_QUOTA": "40.00GiB",
"SPACE_PCT": "4.7%",
"INODE_USED": "44.00",
"INODE_QUOTA": "1.00M",
"INODE_PCT": "0.0%",
},
{
"FILESYSTEM": "cscratch1",
"SPACE_USED": "12.00KiB",
"SPACE_QUOTA": "20.00TiB",
"SPACE_PCT": "0.0%",
"INODE_USED": "3.00",
"INODE_QUOTA": "10.00M",
"INODE_PCT": "0.0%",
},
]
def test_constructor_config(config, mocker):
"""Test that a SiteMoveVerifier can be constructed with a configuration object and a logging object."""
logger_mock = mocker.MagicMock()
p = SiteMoveVerifier(config, logger_mock)
assert p.name == "testing-site_move_verifier"
assert p.dest_root_path == "/path/to/rse"
assert p.dest_site == "NERSC"
assert p.heartbeat_patch_retries == 3
assert p.heartbeat_patch_timeout_seconds == 30
assert p.heartbeat_sleep_duration_seconds == 60
assert p.lta_rest_token == "<PASSWORD>"
assert p.lta_rest_url == "http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/"
assert p.output_status == "taping"
assert p.source_site == "WIPAC"
assert p.work_retries == 3
assert p.work_sleep_duration_seconds == 60
assert p.work_timeout_seconds == 30
assert p.logger == logger_mock
def test_do_status(config, mocker):
"""Verify that the SiteMoveVerifier has additional state to offer."""
logger_mock = mocker.MagicMock()
run_mock = mocker.patch("lta.site_move_verifier.run", new_callable=MagicMock)
run_mock.return_value = ObjectLiteral(
returncode=0,
args=MYQUOTA_ARGS,
stdout=b"FILESYSTEM SPACE_USED SPACE_QUOTA SPACE_PCT INODE_USED INODE_QUOTA INODE_PCT\nhome 1.90GiB 40.00GiB 4.7% 44.00 1.00M 0.0%\ncscratch1 12.00KiB 20.00TiB 0.0% 3.00 10.00M 0.0%\n",
stderr="",
)
p = SiteMoveVerifier(config, logger_mock)
assert p._do_status() == {
"quota": [
{
"FILESYSTEM": "home",
"SPACE_USED": "1.90GiB",
"SPACE_QUOTA": "40.00GiB",
"SPACE_PCT": "4.7%",
"INODE_USED": "44.00",
"INODE_QUOTA": "1.00M",
"INODE_PCT": "0.0%",
},
{
"FILESYSTEM": "cscratch1",
"SPACE_USED": "12.00KiB",
"SPACE_QUOTA": "20.00TiB",
"SPACE_PCT": "0.0%",
"INODE_USED": "3.00",
"INODE_QUOTA": "10.00M",
"INODE_PCT": "0.0%",
},
]
}
def test_do_status_myquota_fails(config, mocker):
"""Verify that the SiteMoveVerifier has no additional state to offer."""
logger_mock = mocker.MagicMock()
run_mock = mocker.patch("lta.site_move_verifier.run", new_callable=MagicMock)
run_mock.return_value = ObjectLiteral(
returncode=1,
args=MYQUOTA_ARGS,
stdout="",
stderr="nersc file systems burned down; again",
)
p = SiteMoveVerifier(config, logger_mock)
assert p._do_status() == {"quota": []}
@pytest.mark.asyncio
async def test_site_move_verifier_logs_configuration(mocker):
"""Test to make sure the SiteMoveVerifier logs its configuration."""
logger_mock = mocker.MagicMock()
site_move_verifier_config = {
"COMPONENT_NAME": "logme-testing-site_move_verifier",
"DEST_ROOT_PATH": "/path/to/some/archive/destination",
"DEST_SITE": "NERSC",
"HEARTBEAT_PATCH_RETRIES": "1",
"HEARTBEAT_PATCH_TIMEOUT_SECONDS": "20",
"HEARTBEAT_SLEEP_DURATION_SECONDS": "30",
"INPUT_STATUS": "transferring",
"LTA_REST_TOKEN": "<PASSWORD>",
"LTA_REST_URL": "logme-http://zjwdm5ggeEgS1tZDZy9l1DOZU53uiSO4Urmyb8xL0.com/",
"OUTPUT_STATUS": "taping",
"RUN_ONCE_AND_DIE": "False",
"SOURCE_SITE": "WIPAC",
"USE_FULL_BUNDLE_PATH": "FALSE",
"WORK_RETRIES": "5",
"WORK_SLEEP_DURATION_SECONDS": "70",
"WORK_TIMEOUT_SECONDS": "90",
}
SiteMoveVerifier(site_move_verifier_config, logger_mock)
EXPECTED_LOGGER_CALLS = [
call("site_move_verifier 'logme-testing-site_move_verifier' is configured:"),
call('COMPONENT_NAME = logme-testing-site_move_verifier'),
call('DEST_ROOT_PATH = /path/to/some/archive/destination'),
call('DEST_SITE = NERSC'),
call('HEARTBEAT_PATCH_RETRIES = 1'),
call('HEARTBEAT_PATCH_TIMEOUT_SECONDS = 20'),
call('HEARTBEAT_SLEEP_DURATION_SECONDS = 30'),
call('INPUT_STATUS = transferring'),
call('LTA_REST_TOKEN = logme-fake-lta-rest-token'),
call('LTA_REST_URL = logme-http://zjwdm5ggeEgS1tZDZy9l1DOZU53uiSO4Urmyb8xL0.com/'),
call('OUTPUT_STATUS = taping'),
call('RUN_ONCE_AND_DIE = False'),
call('SOURCE_SITE = WIPAC'),
call('USE_FULL_BUNDLE_PATH = FALSE'),
call('WORK_RETRIES = 5'),
call('WORK_SLEEP_DURATION_SECONDS = 70'),
call('WORK_TIMEOUT_SECONDS = 90')
]
logger_mock.info.assert_has_calls(EXPECTED_LOGGER_CALLS)
@pytest.mark.asyncio
async def test_script_main(config, mocker, monkeypatch):
"""
Verify SiteMoveVerifier component behavior when run as a script.
Test to make sure running the SiteMoveVerifier as a script does the setup work
that we expect and then launches the site_move_verifier service.
"""
for key in config.keys():
monkeypatch.setenv(key, config[key])
mock_event_loop = mocker.patch("asyncio.get_event_loop")
mock_root_logger = mocker.patch("logging.getLogger")
mock_status_loop = mocker.patch("lta.site_move_verifier.status_loop")
mock_work_loop = mocker.patch("lta.site_move_verifier.work_loop")
main()
mock_event_loop.assert_called()
mock_root_logger.assert_called()
mock_status_loop.assert_called()
mock_work_loop.assert_called()
@pytest.mark.asyncio
async def test_site_move_verifier_run(config, mocker):
"""Test the SiteMoveVerifier does the work the site_move_verifier should do."""
logger_mock = mocker.MagicMock()
p = SiteMoveVerifier(config, logger_mock)
p._do_work = AsyncMock()
await p.run()
p._do_work.assert_called()
@pytest.mark.asyncio
async def test_site_move_verifier_run_exception(config, mocker):
"""Test an error doesn't kill the SiteMoveVerifier."""
logger_mock = mocker.MagicMock()
p = SiteMoveVerifier(config, logger_mock)
p.last_work_end_timestamp = None
p._do_work = AsyncMock()
p._do_work.side_effect = [Exception("bad thing happen!")]
await p.run()
p._do_work.assert_called()
assert p.last_work_end_timestamp
@pytest.mark.asyncio
async def test_site_move_verifier_do_work_pop_exception(config, mocker):
"""Test that _do_work raises when the RestClient can't pop."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
lta_rc_mock.side_effect = HTTPError(500, "LTA DB on fire. Again.")
p = SiteMoveVerifier(config, logger_mock)
with pytest.raises(HTTPError):
await p._do_work()
lta_rc_mock.assert_called_with("POST", '/Bundles/actions/pop?source=WIPAC&dest=NERSC&status=transferring', {'claimant': f'{p.name}-{p.instance_uuid}'})
@pytest.mark.asyncio
async def test_site_move_verifier_do_work_no_results(config, mocker):
"""Test that _do_work goes on vacation when the LTA DB has no work."""
logger_mock = mocker.MagicMock()
dwc_mock = mocker.patch("lta.site_move_verifier.SiteMoveVerifier._do_work_claim", new_callable=AsyncMock)
dwc_mock.return_value = False
p = SiteMoveVerifier(config, logger_mock)
await p._do_work()
dwc_mock.assert_called()
@pytest.mark.asyncio
async def test_site_move_verifier_do_work_yes_results(config, mocker):
"""Test that _do_work keeps working until the LTA DB has no work."""
logger_mock = mocker.MagicMock()
dwc_mock = mocker.patch("lta.site_move_verifier.SiteMoveVerifier._do_work_claim", new_callable=AsyncMock)
dwc_mock.side_effect = [True, True, False]
p = SiteMoveVerifier(config, logger_mock)
await p._do_work()
dwc_mock.assert_called()
@pytest.mark.asyncio
async def test_site_move_verifier_do_work_claim_no_result(config, mocker):
"""Test that _do_work_claim does not work when the LTA DB has no work."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
lta_rc_mock.return_value = {
"bundle": None
}
vb_mock = mocker.patch("lta.site_move_verifier.SiteMoveVerifier._verify_bundle", new_callable=AsyncMock)
p = SiteMoveVerifier(config, logger_mock)
await p._do_work_claim()
lta_rc_mock.assert_called_with("POST", '/Bundles/actions/pop?source=WIPAC&dest=NERSC&status=transferring', {'claimant': f'{p.name}-{p.instance_uuid}'})
vb_mock.assert_not_called()
@pytest.mark.asyncio
async def test_site_move_verifier_do_work_claim_yes_result(config, mocker):
"""Test that _do_work_claim processes the Bundle that it gets from the LTA DB."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient.request", new_callable=AsyncMock)
lta_rc_mock.return_value = {
"bundle": {
"one": 1,
},
}
vb_mock = mocker.patch("lta.site_move_verifier.SiteMoveVerifier._verify_bundle", new_callable=AsyncMock)
p = SiteMoveVerifier(config, logger_mock)
assert await p._do_work_claim()
lta_rc_mock.assert_called_with("POST", '/Bundles/actions/pop?source=WIPAC&dest=NERSC&status=transferring', {'claimant': f'{p.name}-{p.instance_uuid}'})
vb_mock.assert_called_with(mocker.ANY, {"one": 1})
@pytest.mark.asyncio
async def test_site_move_verifier_verify_bundle_bad_checksum(config, mocker):
"""Test that _delete_bundle deletes a completed bundle transfer."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient", new_callable=AsyncMock)
isfile_mock = mocker.patch("os.path.isfile")
isfile_mock.return_value = True
time_mock = mocker.patch("time.time")
time_mock.return_value = 1588042614
getmtime_mock = mocker.patch("os.path.getmtime")
getmtime_mock.return_value = 1588042614 - 120
hash_mock = mocker.patch("lta.site_move_verifier.sha512sum")
hash_mock.return_value = "54321"
bundle_obj = {
"uuid": "8286d3ba-fb1b-4923-876d-935bdf7fc99e",
"dest": "nersc",
"path": "/data/exp/IceCube/2014/unbiased/PFRaw/1109",
"transfer_reference": "dataset-nersc|8286d3ba-fb1b-4923-876d-935bdf7fc99e.zip",
"bundle_path": "/mnt/lfss/lta/scratch/8286d3ba-fb1b-4923-876d-935bdf7fc99e.zip",
"checksum": {
"sha512": "12345",
},
}
p = SiteMoveVerifier(config, logger_mock)
await p._verify_bundle(lta_rc_mock, bundle_obj)
hash_mock.assert_called_with("/path/to/rse/8286d3ba-fb1b-4923-876d-935bdf7fc99e.zip")
lta_rc_mock.request.assert_called_with("PATCH", '/Bundles/8286d3ba-fb1b-4923-876d-935bdf7fc99e', {
"status": "quarantined",
"reason": mocker.ANY,
"work_priority_timestamp": mocker.ANY,
})
@pytest.mark.asyncio
async def test_site_move_verifier_verify_bundle_good_checksum(config, mocker):
"""Test that _delete_bundle deletes a completed bundle transfer."""
logger_mock = mocker.MagicMock()
lta_rc_mock = mocker.patch("rest_tools.client.RestClient", new_callable=AsyncMock)
isfile_mock = mocker.patch("os.path.isfile")
isfile_mock.return_value = True
time_mock = mocker.patch("time.time")
time_mock.return_value = 1588042614
getmtime_mock = mocker.patch("os.path.getmtime")
getmtime_mock.return_value = 1588042614 - 120
hash_mock = mocker.patch("lta.site_move_verifier.sha512sum")
hash_mock.return_value = "12345"
bundle_obj = {
"uuid": "8286d3ba-fb1b-4923-876d-935bdf7fc99e",
"dest": "nersc",
"path": "/data/exp/IceCube/2014/unbiased/PFRaw/1109",
"transfer_reference": "dataset-nersc|8286d3ba-fb1b-4923-876d-935bdf7fc99e.zip",
"bundle_path": "/mnt/lfss/lta/scratch/8286d3ba-fb1b-4923-876d-935bdf7fc99e.zip",
"checksum": {
"sha512": "12345",
},
}
p = SiteMoveVerifier(config, logger_mock)
await p._verify_bundle(lta_rc_mock, bundle_obj)
hash_mock.assert_called_with("/path/to/rse/8286d3ba-fb1b-4923-876d-935bdf7fc99e.zip")
lta_rc_mock.request.assert_called_with("PATCH", '/Bundles/8286d3ba-fb1b-4923-876d-935bdf7fc99e', {
"status": "taping",
"reason": "",
"update_timestamp": mocker.ANY,
"claimed": False,
})
``` |
{
"source": "jnbli/CanvasSync",
"score": 3
} |
#### File: CanvasSync/entities/file.py
```python
from __future__ import print_function
# Inbuilt modules
import os
import sys
import datetime
import time
# Third party
from six import text_type
from CanvasSync.entities.canvas_entity import CanvasEntity
from CanvasSync.utilities.ANSI import ANSI
from CanvasSync.utilities import helpers
class File(CanvasEntity):
def __init__(self, file_info, parent, add_to_list_of_entities=True):
"""
Constructor method, initializes base CanvasEntity class
assignment_info : dict | A dictionary of information on the Canvas file object
parent : object | The parent object, a Module, SubHeader, Folder or Assignment object
"""
self.file_info = file_info
self.locked = self.file_info["locked_for_user"]
file_id = self.file_info[u"id"]
file_name = helpers.get_corrected_name(self.file_info[u"display_name"])
file_path = parent.get_path() + file_name
# Initialize base class
CanvasEntity.__init__(self,
id_number=file_id,
name=file_name,
sync_path=file_path,
parent=parent,
folder=False,
identifier=u"file",
add_to_list_of_entities=add_to_list_of_entities)
def __repr__(self):
""" String representation, overwriting base class method """
return u" " * 15 + u"| " + u"\t" * self.indent + u"%s: %s" % (ANSI.format(u"File",
formatting=u"file"),
self.name)
def download(self):
""" Download the file """
canvas_updated_at = datetime.datetime.strptime(self.file_info['updated_at'], "%Y-%m-%dT%H:%M:%SZ")
# if our local file exists and is newer than that on canvas, no need to download
if os.path.exists(self.sync_path):
local_updated_at = datetime.datetime.strptime(time.ctime(os.path.getmtime(self.sync_path)), "%a %b %d %H:%M:%S %Y")
if local_updated_at >= canvas_updated_at:
return False
self.print_status(u"DOWNLOADING", color=u"blue")
# Download file payload from server
file_data = self.api.download_file_payload(self.file_info[u"url"])
# Write data to file
try:
with open(self.sync_path, u"wb") as out_file:
out_file.write(file_data)
# after downloading file, change the modified date to match that on canvas
modTime = time.mktime(canvas_updated_at.timetuple())
os.utime(self.sync_path, (modTime, modTime))
except KeyboardInterrupt as e:
# If interrupted mid-writing, delete the corrupted file
if os.path.exists(self.sync_path):
os.remove(self.sync_path)
# Re-raise, will be catched in CanvasSync.py
raise e
return True
def print_status(self, status, color, overwrite_previous_line=False):
""" Print status to console """
if overwrite_previous_line:
# Move up one line
sys.stdout.write(ANSI.format(u"", formatting=u"lineup"))
sys.stdout.flush()
print(ANSI.format(u"[%s]" % status, formatting=color) + str(self)[len(status) + 2:])
sys.stdout.flush()
def walk(self, counter):
""" Stop walking, endpoint """
print(text_type(self))
counter[0] += 1
return
def sync(self):
"""
Synchronize the file by downloading it from the Canvas server and saving it to the sync path
If the file has already been downloaded, skip downloading.
File objects have no children objects and represents an end point of a folder traverse.
"""
if not self.locked:
was_downloaded = self.download()
self.print_status(u"SYNCED", color=u"green", overwrite_previous_line=was_downloaded)
else:
self.print_status(u"LOCKED", color=u"red", overwrite_previous_line=False)
def show(self):
""" Show the folder hierarchy by printing every level """
print(text_type(self))
```
#### File: CanvasSync/entities/linked_file.py
```python
from __future__ import print_function
# Inbuilt modules
import os
import sys
# Third party modules
import requests
from six import text_type
# CanvasSync module imports
from CanvasSync.entities.canvas_entity import CanvasEntity
from CanvasSync.utilities.ANSI import ANSI
class LinkedFile(CanvasEntity):
def __init__(self, download_url, parent):
"""
Constructor method, initializes base CanvasEntity class
download_url : string | A URL pointing to a file somewhere on the web
parent : object | The parent object, an Assignment object
"""
self.download_url = download_url
self.valid_url = True
# Get the potential file name from the URL
# OBS: We do not correct the name in this class, as we need to use the length of the name to determine
# if the link is valid.
file_name = os.path.split(download_url)[-1]
# File path
file_path = parent.get_path() + file_name
# No file extension or weirdly long filename will not be allowed
# (this is not strictly necessary as the regex should only match OK URLs)
if not os.path.splitext(file_name)[-1] or len(file_name) > 60:
self.valid_url = False
# Initialize base class
CanvasEntity.__init__(self,
id_number=-1,
name=file_name,
sync_path=file_path,
parent=parent,
folder=False,
identifier=u"linked_file")
def __repr__(self):
""" String representation, overwriting base class method """
return u" " * 15 + u"| " + u"\t" * self.indent + u"%s: %s" % (ANSI.format(u"Linked File",
formatting=u"linkedfile"),
self.name)
def url_is_valid(self):
return self.valid_url
def print_status(self, status, color, overwrite_previous_line=False):
""" Print status to console """
if overwrite_previous_line:
# Move up one line
sys.stdout.write(ANSI.format(u"", formatting=u"lineup"))
sys.stdout.flush()
print(ANSI.format(u"[%s]" % status, formatting=color) + str(self)[len(status) + 2:])
sys.stdout.flush()
def download(self):
"""
Download the file, returns True or False depecting if the file was downloaded or not. Returns -1 if the file
was attempted downloaded but failed.
"""
if os.path.exists(self.sync_path):
return False
self.print_status(u"DOWNLOADING", color=u"blue")
# Attempt to download the file
try:
response = requests.get(self.download_url)
except Exception:
# Could not download, catch any exception
self.print_status(u"FAILED", u"red", overwrite_previous_line=True)
return -1
# Check for OK 200 HTTP response
if not response.status_code == 200:
self.print_status(u"FAILED", u"red", overwrite_previous_line=True)
return -1
# If here, download was successful, write to disk and print status
with open(self.sync_path, u"wb") as out_file:
out_file.write(response.content)
return True
def walk(self, counter):
""" Stop walking, endpoint """
print(text_type(self))
counter[0] += 1
return
def sync(self):
"""
Attempt to download a file a the url 'download_url' to the path 'path'/filename while printing
the status using an indent of print_indent to align with the parent object
"""
was_downloaded = self.download()
if was_downloaded != - 1:
self.print_status(u"SYNCED", color=u"green", overwrite_previous_line=was_downloaded)
def show(self):
""" Show the folder hierarchy by printing every level """
print(text_type(self))
```
#### File: CanvasSync/entities/page.py
```python
from __future__ import print_function
# Inbuilt modules
import os
import sys
import io
import re
# Third party
from six import text_type
from CanvasSync.entities.canvas_entity import CanvasEntity
from CanvasSync.utilities.ANSI import ANSI
from CanvasSync.utilities import helpers
from CanvasSync.entities.file import File
from CanvasSync.entities.linked_file import LinkedFile
class Page(CanvasEntity):
def __init__(self, page_info, parent):
"""
Constructor method, initializes base CanvasEntity class
page_info : dict | A dictionary of information on the Canvas page object
parent : object | The parent object, a Module or SubHeader object
"""
# Sometimes the Page object is initialized with a json dict of information on the file like object representing
# the HTML page instead of an object on the page itself. This file like object does not store the actual HTML
# body, which will be downloaded in the self.download() method. The slightly messy code below makes the class
# functional with either information supplied.
self.page_item_info = page_info
self.page_info = self.page_item_info if u"id" not in self.page_item_info else None
page_id = self.page_item_info[u"id"] if not self.page_info else self.page_info[u"page_id"]
page_name = helpers.get_corrected_name(self.page_item_info[u"title"])
page_path = parent.get_path() + page_name
# Initialize base class
CanvasEntity.__init__(self,
id_number=page_id,
name=page_name,
sync_path=page_path,
parent=parent,
folder=False,
identifier=u"page")
def __repr__(self):
""" String representation, overwriting base class method """
return u" " * 15 + u"| " + u"\t" * self.indent + u"%s: %s" % (ANSI.format(u"Page",
formatting=u"page"),
self.name)
def download_linked_files(self, html_body):
sub_files = False
# Look for files in the HTML body
# Get file URLs pointing to Canvas items
canvas_file_urls = re.findall(r'data-api-endpoint=\"(.*?)\"', html_body or "")
# Download information on all found files and add File objects to the children
for url in canvas_file_urls:
try:
file_info = self.api.download_item_information(url)
if u'display_name' not in file_info:
continue
except Exception:
continue
item = File(file_info, parent=self)
self.add_child(item)
sub_files = True
if self.settings.download_linked:
# We also look for links to files downloaded from other servers
# Get all URLs ending in a file name (determined as a ending with a '.'
# and then between 1 and 10 of any characters after that). This has 2 purposes:
# 1) We do not try to re-download Canvas server files, since they are not matched by this regex
# 2) We should stay clear of all links to web-sites (they could be large to download, we skip them here)
urls = re.findall(r'href=\"([^ ]*[.]{1}.{1,10})\"', html_body or "")
for url in urls:
linked_file = LinkedFile(url, self)
if linked_file.url_is_valid():
self.add_child(linked_file)
sub_files = True
else:
del linked_file
return sub_files
def push_down(self):
"""
Lower the level of this page once into a sub-folder of similar name
"""
self._make_folder()
base, tail = os.path.split(self.sync_path)
self.sync_path = self.sync_path + u"/" + tail
def download(self):
""" Download the page """
if os.path.exists(self.sync_path + u".html"):
return False
# Print download status
self.print_status(u"DOWNLOADING", color=u"blue")
# Download additional info and HTML body of the Page object if not already supplied
self.page_info = self.api.download_item_information(self.page_item_info[u"url"]) if not self.page_info else self.page_info
# Create a HTML page locally and add a link leading to the live version
body = self.page_info.get(u"body", "")
html_url = self.page_info.get(u"html_url", "")
if self.download_linked_files(body):
self.push_down()
if not os.path.exists(self.sync_path):
with io.open(self.sync_path + u".html", u"w", encoding=u"utf-8") as out_file:
out_file.write(u"<h1><strong>%s</strong></h1>" % self.name)
out_file.write(u"<big><a href=\"%s\">Click here to open the live page in Canvas</a></big>" % html_url)
out_file.write(u"<hr>")
out_file.write(body)
return True
def print_status(self, status, color, overwrite_previous_line=False):
""" Print status to console """
if overwrite_previous_line:
# Move up one line
sys.stdout.write(ANSI.format(u"", formatting=u"lineup"))
sys.stdout.flush()
print(ANSI.format(u"[%s]" % status, formatting=color) + str(self)[len(status) + 2:])
sys.stdout.flush()
def walk(self, counter):
""" Stop walking, endpoint """
print(text_type(self))
counter[0] += 1
return
def sync(self):
"""
Synchronize the page by downloading it from the Canvas server and saving it to the sync path
If the page has already been downloaded, skip downloading.
Page objects have no children objects and represents an end point of a folder traverse.
"""
was_downloaded = self.download()
self.print_status(u"SYNCED", color=u"green", overwrite_previous_line=was_downloaded)
for file in self:
file.update_path()
file.sync()
def show(self):
""" Show the folder hierarchy by printing every level """
print(text_type(self))
```
#### File: CanvasSync/CanvasSync/usage.py
```python
from __future__ import print_function
# Inbuilt modules
import sys
def help():
print(u"""
-------------------------
CanvasSync
<NAME>
February 2017--
-------------------------
CanvasSync helps students automatically synchronize modules, assignments & files located on their
institutions Canvas web server to a mirrored folder on their local computer.
Usage
-----
$ canvas.py [-S] <sync> [-h] <help> [-s] <reset settings> [-i] <show current settings>
[-p {password}] <specify password>
-h [--help], optional : Show this help screen.
-S [--sync], optional : Synchronize with Canvas
-s [--setup], optional : Enter settings setup screen.
The first time CanvasSync is launched settings must be set. Invoking
CanvasSync with the -s or --setup flags will allow the user to reset
these settings.
-i [--info], optional : Show currently active settings.
-p {password}, optional : Specify settings file decryption password (potentially dangerous)
Setup
-----
CanvasSync requires at least the following settings to be set:
- A path pointing to a local folder. This folder will store the synced files and folders.
- A Canvas web server URL.
- An authentication token (see https://github.com/perslev/CanvasSync for details)
- A list of courses that should be synchronized.
CanvasSync will guide you through these settings during the first time launch. Alternatively,
the settings may be reset using the -s or --setup flag. See below.
ADDITIONAL RESOURCES
--------------------
- Authentication token info and help:
https://github.com/perslev/CanvasSync
- Canvas by Instructure home page
https://canvas.instructure.com/
- Canvas LMS API documentation
https://api.instructure.com
""")
sys.exit(0)
``` |
{
"source": "jnbrauer/qtnodes",
"score": 3
} |
#### File: qtnodes/qtnodes/helpers.py
```python
import json
from PySide2 import QtWidgets
from PySide2 import QtGui
from PySide2 import QtCore
def readFileContent(filePath):
"""Return the content of the file."""
with open(filePath) as f:
return f.read()
def toJson(serialized):
"""Return JSON string from given native Python datatypes."""
return json.dumps(serialized, encoding="utf-8", indent=4)
def fromJson(jsonString):
"""Return native Python datatypes from JSON string."""
return json.loads(jsonString, encoding="utf-8")
def getTextSize(text, painter=None):
"""Return a QSize based on given string.
If no painter is supplied, the font metrics are based on a default
QPainter, which may be off depending on the font und text size used.
"""
if not painter:
metrics = QtGui.QFontMetrics(QtGui.QFont())
else:
metrics = painter.fontMetrics()
size = metrics.size(QtCore.Qt.TextSingleLine, text)
return size
``` |
{
"source": "JNC96/drone-gym",
"score": 2
} |
#### File: drone-gym/custom_scripts/runner.py
```python
import time
from tqdm import tqdm
import numpy as np
from tensorforce import util
from tensorforce.agents import Agent
from tensorforce.environments import Environment
class Runner(object):
def __init__(self, agent, environment, evaluation_environment=None, save_best_agent=False):
# save_best overwrites saver...
self.is_environment_external = isinstance(environment, Environment)
self.environment = Environment.create(environment=environment)
self.is_eval_environment_external = isinstance(evaluation_environment, Environment)
if evaluation_environment is None:
self.evaluation_environment = None
else:
self.evaluation_environment = Environment.create(environment=evaluation_environment)
self.save_best_agent = save_best_agent
self.is_agent_external = isinstance(agent, Agent)
kwargs = dict()
# warning: save_best_agent
if not self.is_agent_external and self.save_best_agent:
# Disable periodic saving
kwargs = dict(saver=dict(seconds=None, steps=None))
self.agent = Agent.create(agent=agent, environment=self.environment, **kwargs)
if not self.agent.model.is_initialized:
self.agent.initialize()
self.global_episodes = self.agent.episodes
self.global_timesteps = self.agent.timesteps
self.global_updates = self.agent.updates
self.episode_rewards = list()
self.episode_timesteps = list()
self.episode_seconds = list()
self.episode_agent_seconds = list()
def close(self):
if hasattr(self, 'tqdm'):
self.tqdm.close()
if not self.is_agent_external:
self.agent.close()
if not self.is_environment_external:
self.environment.close()
if self.evaluation_environment is not None and not self.is_eval_environment_external:
self.evaluation_environment.close()
# TODO: make average reward another possible criteria for runner-termination
def run(
self,
# General
num_episodes=None, num_timesteps=None, num_updates=None, max_episode_timesteps=None,
num_repeat_actions=1,
# Callback
callback=None, callback_episode_frequency=None, callback_timestep_frequency=None,
# Tqdm
use_tqdm=True, mean_horizon=10,
# Evaluation
evaluation=False, evaluation_callback=None, evaluation_frequency=None,
max_evaluation_timesteps=None, num_evaluation_iterations=1
):
# General
if num_episodes is None:
self.num_episodes = float('inf')
else:
self.num_episodes = num_episodes
if num_timesteps is None:
self.num_timesteps = float('inf')
else:
self.num_timesteps = num_timesteps
if num_updates is None:
self.num_updates = float('inf')
else:
self.num_updates = num_updates
if max_episode_timesteps is None:
self.max_episode_timesteps = float('inf')
else:
self.max_episode_timesteps = max_episode_timesteps
self.num_repeat_actions = num_repeat_actions
# Callback
assert callback_episode_frequency is None or callback_timestep_frequency is None
if callback_episode_frequency is None and callback_timestep_frequency is None:
callback_episode_frequency = 1
if callback_episode_frequency is None:
self.callback_episode_frequency = float('inf')
else:
self.callback_episode_frequency = callback_episode_frequency
if callback_timestep_frequency is None:
self.callback_timestep_frequency = float('inf')
else:
self.callback_timestep_frequency = callback_timestep_frequency
if callback is None:
self.callback = (lambda r: True)
elif util.is_iterable(x=callback):
def sequential_callback(runner):
result = True
for fn in callback:
x = fn(runner)
if isinstance(result, bool):
result = result and x
return result
self.callback = sequential_callback
else:
def boolean_callback(runner):
result = callback(runner)
if isinstance(result, bool):
return result
else:
return True
self.callback = boolean_callback
# Tqdm
if use_tqdm:
if hasattr(self, 'tqdm'):
self.tqdm.close()
assert self.num_episodes != float('inf') or self.num_timesteps != float('inf')
inner_callback = self.callback
if self.num_episodes != float('inf'):
# Episode-based tqdm (default option if both num_episodes and num_timesteps set)
assert self.num_episodes != float('inf')
bar_format = (
'{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed}, reward={postfix[0]:.2f}, ts/ep='
'{postfix[1]}, sec/ep={postfix[2]:.2f}, ms/ts={postfix[3]:.1f}, agent='
'{postfix[4]:.1f}%]'
)
postfix = [0.0, 0, 0.0, 0.0, 0.0]
self.tqdm = tqdm(
desc='Episodes', total=self.num_episodes, bar_format=bar_format,
initial=self.global_episodes, postfix=postfix
)
self.tqdm_last_update = self.global_episodes
def tqdm_callback(runner):
mean_reward = float(np.mean(runner.episode_rewards[-mean_horizon:]))
mean_ts_per_ep = int(np.mean(runner.episode_timesteps[-mean_horizon:]))
mean_sec_per_ep = float(np.mean(runner.episode_seconds[-mean_horizon:]))
mean_agent_sec = float(np.mean(runner.episode_agent_seconds[-mean_horizon:]))
mean_ms_per_ts = mean_sec_per_ep * 1000.0 / mean_ts_per_ep
mean_rel_agent = mean_agent_sec * 100.0 / mean_sec_per_ep
runner.tqdm.postfix[0] = mean_reward
runner.tqdm.postfix[1] = mean_ts_per_ep
runner.tqdm.postfix[2] = mean_sec_per_ep
runner.tqdm.postfix[3] = mean_ms_per_ts
runner.tqdm.postfix[4] = mean_rel_agent
runner.tqdm.update(n=(runner.global_episodes - runner.tqdm_last_update))
runner.tqdm_last_update = runner.global_episodes
return inner_callback(runner)
else:
# Timestep-based tqdm
assert self.num_timesteps != float('inf')
self.tqdm = tqdm(
desc='Timesteps', total=self.num_timesteps, initial=self.global_timesteps,
postfix=dict(mean_reward='n/a')
)
self.tqdm_last_update = self.global_timesteps
def tqdm_callback(runner):
# sum_timesteps_reward = sum(runner.timestep_rewards[num_mean_reward:])
# num_timesteps = min(num_mean_reward, runner.episode_timestep)
# mean_reward = sum_timesteps_reward / num_episodes
runner.tqdm.set_postfix(mean_reward='n/a')
runner.tqdm.update(n=(runner.global_timesteps - runner.tqdm_last_update))
runner.tqdm_last_update = runner.global_timesteps
return inner_callback(runner)
self.callback = tqdm_callback
# Evaluation
self.evaluation = evaluation
if evaluation_callback is None:
self.evaluation_callback = (lambda r: None)
else:
assert not self.evaluation
self.evaluation_callback = evaluation_callback
self.evaluation_frequency = evaluation_frequency
if max_evaluation_timesteps is None:
self.max_evaluation_timesteps = float('inf')
else:
assert not self.evaluation
self.max_evaluation_timesteps = max_evaluation_timesteps
self.num_evaluation_iterations = num_evaluation_iterations
if self.save_best_agent:
assert not self.evaluation
inner_evaluation_callback = self.evaluation_callback
def mean_reward_callback(runner):
result = inner_evaluation_callback(runner)
if result is None:
return float(np.mean(runner.evaluation_rewards))
else:
return result
self.evaluation_callback = mean_reward_callback
self.best_evaluation_score = None
# Reset agent
self.agent.reset()
# Timestep/episode/update counter
self.timesteps = 0
self.episodes = 0
self.updates = 0
self.interactive = bool(int(input("\nWould you like this run to use user inputs? 0 - No, 1 - Yes -- ")))
# Episode loop
while True:
# Run episode
if not self.run_episode(
environment=self.environment, max_timesteps=self.max_episode_timesteps,
evaluation=self.evaluation
):
return
# Increment episode counter (after calling callback)
self.episodes += 1
# Update experiment statistics
self.episode_rewards.append(self.episode_reward)
self.episode_timesteps.append(self.episode_timestep)
self.episode_seconds.append(self.episode_second)
self.episode_agent_seconds.append(self.episode_agent_second)
# Run evaluation
if self.evaluation_frequency is None:
is_evaluation = self.episode_updated
else:
is_evaluation = (self.episodes % self.evaluation_frequency == 0)
if is_evaluation:
if self.evaluation_environment is None:
environment = self.environment
else:
environment = self.evaluation_environment
self.evaluation_rewards = list()
self.evaluation_timesteps = list()
self.evaluation_seconds = list()
self.evaluation_agent_seconds = list()
# Evaluation loop
for _ in range(self.num_evaluation_iterations):
self.run_episode(
environment=environment, max_timesteps=self.max_evaluation_timesteps,
evaluation=True
)
self.evaluation_rewards.append(self.episode_reward)
self.evaluation_timesteps.append(self.episode_timestep)
self.evaluation_seconds.append(self.episode_second)
self.evaluation_agent_seconds.append(self.episode_agent_second)
# Evaluation callback
if self.save_best_agent:
evaluation_score = self.evaluation_callback(self)
assert isinstance(evaluation_score, float)
if self.best_evaluation_score is None:
self.best_evaluation_score = evaluation_score
elif evaluation_score > self.best_evaluation_score:
self.best_evaluation_score = evaluation_score
self.agent.save(filename='best-model', append_timestep=False)
else:
self.evaluation_callback(self)
# Update global timestep/episode/update
self.global_timesteps = self.agent.timesteps
self.global_episodes = self.agent.episodes
self.global_updates = self.agent.updates
# Callback
if self.episodes % self.callback_episode_frequency == 0 and not self.callback(self):
return
# Terminate experiment if too long
if self.global_timesteps >= self.num_timesteps:
return
elif self.evaluation and self.timesteps >= self.num_timesteps:
return
elif self.global_episodes >= self.num_episodes:
return
elif self.evaluation and self.episodes >= self.num_episodes:
return
elif self.global_updates >= self.num_updates:
return
elif self.evaluation and self.updates >= self.num_updates:
return
elif self.agent.should_stop():
return
def action_rank(self, states, evaluation):
action_buffer = []
print("*********************")
print("*********************")
print("\n%------------------------")
print("% STATE @ STEP# "+str(states[0]*states[1]))
print("%------------------------\n")
print("Slope: "+str(states[2])+" --- @("+str(states[0])+","+str(states[1])+")")
for _ in range(0,4):
# here,independent is TRUE because in the normal pipeline you would have to observe
# after taking an action, but we are simply sampling actions.
tmp_action = self.agent.act(states=states, independent = True, evaluation = False)
print("\n%------------------------")
print("% ACTION "+str(_+1))
print("%------------------------\n")
print("Camera Angle: "+str(tmp_action[0]))
print("Speed: "+str(tmp_action[1]))
print("Height: "+str(tmp_action[2]))
action_buffer.append(tmp_action)
action_choice = int(input("\nPlease select the optimal action (1-4): ")) - 1
while action_choice>4 or action_choice<0:
action_choice = int(input("\nPlease select the optimal action (1-4): ")) - 1
return action_buffer[action_choice]
def run_episode(self, environment, max_timesteps, evaluation):
# Episode statistics
self.episode_reward = 0
self.episode_timestep = 0
self.episode_updated = False
self.episode_agent_second = 0.0
episode_start = time.time()
# Start environment episode
states = environment.reset()
# Timestep loop
while True:
# Retrieve actions from agent
agent_start = time.time()
# user action only runs for the first episodes: only 50 steps
if self.agent.episodes == 0 and self.interactive:
print(self.interactive)
print(self.agent.episodes)
user_action = self.action_rank(states=states, evaluation=evaluation)
else:
user_action = 0
# run with selected action
if self.agent.episodes > 0:
self.interactive = False
actions = self.agent.act(states=states, evaluation=evaluation, int_bool = self.interactive, int_act = user_action)
self.timesteps += 1
self.episode_agent_second += time.time() - agent_start
self.episode_timestep += 1
# Execute actions in environment (optional repeated execution)
reward = 0.0
for _ in range(self.num_repeat_actions):
states, terminal, step_reward = environment.execute(actions=actions)
if isinstance(terminal, bool):
terminal = int(terminal)
reward += step_reward
if terminal > 0:
break
self.episode_reward += reward
# Terminate episode if too long
if self.episode_timestep >= max_timesteps:
terminal = 2
# Observe unless evaluation
if not evaluation:
agent_start = time.time()
updated = self.agent.observe(terminal=terminal, reward=reward)
self.updates += int(updated)
self.episode_agent_second += time.time() - agent_start
self.episode_updated = self.episode_updated or updated
# Callback
if self.episode_timestep % self.callback_timestep_frequency == 0 and \
not self.callback(self):
return False
# Episode termination check
if terminal > 0:
break
# No callbacks for evaluation
if evaluation:
continue
# Update global timestep/episode/update
self.global_timesteps = self.agent.timesteps
self.global_episodes = self.agent.episodes
self.global_updates = self.agent.updates
# Terminate experiment if too long
if self.global_timesteps >= self.num_timesteps:
return
elif self.global_episodes >= self.num_episodes:
return
elif self.global_updates >= self.num_updates:
return
elif self.agent.should_stop():
return False
# Update episode statistics
self.episode_second = time.time() - episode_start
return True
``` |
{
"source": "JNC96/tensorforce",
"score": 2
} |
#### File: JNC96/tensorforce/run.py
```python
import argparse
import importlib
import json
import os
import matplotlib
import numpy as np
import tensorflow as tf
from tensorforce.agents import Agent
from tensorforce.environments import Environment
from tensorforce.execution import Runner
matplotlib.use('Agg')
import matplotlib.pyplot as plt
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(v=tf.logging.ERROR)
def main():
parser = argparse.ArgumentParser(description='Tensorforce runner')
parser.add_argument(
'agent', help='Agent (configuration JSON file, name, or library module)'
)
parser.add_argument(
'environment',
help='Environment (name, configuration JSON file, or library module)'
)
# Agent arguments
parser.add_argument(
'-n', '--network', type=str, default=None,
help='Network (configuration JSON file, name, or library module)'
)
# Environment arguments
parser.add_argument(
'-l', '--level', type=str, default=None,
help='Level or game id, like `CartPole-v1`, if supported'
)
parser.add_argument(
'--visualize', action='store_true',
help='Visualize agent--environment interaction, if supported'
)
parser.add_argument(
'-i', '--import-modules', type=str, default=None,
help='Import comma-separated modules required for environment'
)
# Runner arguments
parser.add_argument('-t', '--timesteps', type=int, default=None, help='Number of timesteps')
parser.add_argument('-e', '--episodes', type=int, default=None, help='Number of episodes')
parser.add_argument(
'-m', '--max-episode-timesteps', type=int, default=None,
help='Maximum number of timesteps per episode'
),
parser.add_argument(
'--mean-horizon', type=int, default=10,
help='Number of timesteps/episodes for mean reward computation'
)
parser.add_argument('-v', '--evaluation', action='store_true', help='Evaluation mode')
parser.add_argument(
'-s', '--save-best-agent', action='store_true', help='Save best-performing agent'
)
# Logging arguments
parser.add_argument('-r', '--repeat', type=int, default=1, help='Number of repetitions')
parser.add_argument(
'-p', '--path', type=str, default=None,
help='Logging path, directory plus filename without extension'
)
parser.add_argument('--seaborn', action='store_true', help='Use seaborn')
args = parser.parse_args()
if args.import_modules is not None:
for module in args.import_modules.split(','):
importlib.import_module(name=module)
if args.path is None:
callback = None
else:
assert os.path.splitext(args.path)[1] == ''
assert args.episodes is not None and args.visualize is not None
rewards = [list() for _ in range(args.episodes)]
timesteps = [list() for _ in range(args.episodes)]
seconds = [list() for _ in range(args.episodes)]
agent_seconds = [list() for _ in range(args.episodes)]
def callback(r):
rewards[r.episode - 1].append(r.episode_reward)
timesteps[r.episode - 1].append(r.episode_timestep)
seconds[r.episode - 1].append(r.episode_second)
agent_seconds[r.episode - 1].append(r.episode_agent_second)
return True
if args.visualize:
if args.level is None:
environment = Environment.create(environment=args.environment, visualize=True)
else:
environment = Environment.create(
environment=args.environment, level=args.level, visualize=True
)
else:
if args.level is None:
environment = Environment.create(environment=args.environment)
else:
environment = Environment.create(environment=args.environment, level=args.level)
for _ in range(args.repeat):
agent_kwargs = dict()
if args.network is not None:
agent_kwargs['network'] = args.network
if args.max_episode_timesteps is not None:
assert environment.max_episode_timesteps() is None or \
environment.max_episode_timesteps() == args.max_episode_timesteps
agent_kwargs['max_episode_timesteps'] = args.max_episode_timesteps
agent = Agent.create(agent=args.agent, environment=environment, **agent_kwargs)
runner = Runner(agent=agent, environment=environment)
runner.run(
num_timesteps=args.timesteps, num_episodes=args.episodes,
max_episode_timesteps=args.max_episode_timesteps, callback=callback,
mean_horizon=args.mean_horizon, evaluation=args.evaluation
# save_best_model=args.save_best_model
)
runner.close()
if args.path is not None:
if not os.path.isdir(os.path.split(args.path)[0]):
os.makedirs(os.path.split(args.path)[0], exist_ok=True)
with open(args.path + '.json', 'w') as filehandle:
filehandle.write(
json.dumps(dict(
rewards=rewards, timesteps=timesteps, seconds=seconds,
agent_seconds=agent_seconds
))
)
if args.seaborn:
import seaborn as sns
sns.set()
xs = np.arange(len(rewards))
min_rewards = np.amin(rewards, axis=1)
max_rewards = np.amax(rewards, axis=1)
median_rewards = np.median(rewards, axis=1)
plt.plot(xs, median_rewards, color='green', linewidth=2.0)
plt.fill_between(xs, min_rewards, max_rewards, color='green', alpha=0.4)
plt.xlabel('episodes')
plt.ylabel('reward')
plt.savefig(fname=(args.path + '.png'))
if __name__ == '__main__':
main()
```
#### File: core/networks/preprocessor.py
```python
from collections import Counter, OrderedDict
import tensorflow as tf
from tensorforce import TensorforceError, util
from tensorforce.core.layers import PreprocessingLayer, StatefulLayer, TemporalLayer
from tensorforce.core.networks import LayerbasedNetwork
class Preprocessor(LayerbasedNetwork):
"""
Special preprocessor network following a sequential layer-stack architecture, which can be
specified as either a single or a list of layer specifications.
Args:
name (string): Network name
(<span style="color:#0000C0"><b>internal use</b></span>).
input_spec (specification): Input tensor specification
(<span style="color:#0000C0"><b>internal use</b></span>).
layers (iter[specification] | iter[iter[specification]]): Layers configuration, see
[layers](../modules/layers.html)
(<span style="color:#C00000"><b>required</b></span>).
device (string): Device name
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
summary_labels ('all' | iter[string]): Labels of summaries to record
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
l2_regularization (float >= 0.0): Scalar controlling L2 regularization
(<span style="color:#00C000"><b>default</b></span>: inherit value of parent module).
"""
def __init__(
self, name, input_spec, layers, device=None, summary_labels=None, l2_regularization=None
):
super().__init__(
name=name, inputs_spec=input_spec, device=device, summary_labels=summary_labels,
l2_regularization=l2_regularization
)
if isinstance(layers, (dict, str)):
layers = [layers]
layer_counter = Counter()
for layer_spec in layers:
if 'name' in layer_spec:
layer_name = layer_spec['name']
else:
if isinstance(layer_spec, dict) and isinstance(layer_spec.get('type'), str):
layer_type = layer_spec['type']
else:
layer_type = 'layer'
layer_name = layer_type + str(layer_counter[layer_type])
layer_counter[layer_type] += 1
self.add_module(name=layer_name, module=layer_spec)
@classmethod
def internals_spec(cls, network=None, **kwargs):
raise NotImplementedError
def internals_init(self):
raise NotImplementedError
def tf_dependency_horizon(self, is_optimization=False):
raise NotImplementedError
def add_module(self, *args, **kwargs):
layer = super().add_module(*args, **kwargs)
if isinstance(layer, (TemporalLayer, StatefulLayer)):
raise TensorforceError.type(
name='preprocessor network', argument='sub-module', value=layer
)
return layer
def tf_reset(self):
operations = list()
for layer in self.modules.values():
if isinstance(layer, PreprocessingLayer):
operations.append(layer.reset())
return tf.group(*operations)
def tf_apply(self, x):
for layer in self.modules.values():
x = layer.apply(x=x)
return x
def create_tf_function(self, name, tf_function):
if tf_function.__name__ != 'tf_apply':
return super().create_tf_function(name=name, tf_function=tf_function)
def validated_tf_function(x):
if util.is_atomic_values_spec(values_spec=self.inputs_spec):
if not util.is_consistent_with_value_spec(value_spec=self.inputs_spec, x=x):
raise TensorforceError("Invalid input arguments for tf_apply.")
else:
if not all(
util.is_consistent_with_value_spec(value_spec=spec, x=x[name])
for name, spec in self.inputs_spec.items()
):
raise TensorforceError("Invalid input arguments for tf_apply.")
x = tf_function(x=x)
if not util.is_consistent_with_value_spec(value_spec=self.get_output_spec(), x=x):
raise TensorforceError("Invalid output arguments for tf_apply.")
return x
return super().create_tf_function(name=name, tf_function=validated_tf_function)
``` |
{
"source": "jncamero/Python",
"score": 2
} |
#### File: jncamero/Python/flank_seq.py
```python
import pandas as pd
from pysam import FastaFile
import numpy as np
import os
def flank_seq(freg,snpfile,varfile,Ref):
flanks=pd.read_csv(varfile,header=None)
outfile = open("Markers.fasta", "a")
with open(snpfile) as f:
ref=FastaFile(fasta)
for line in f:
fname = line.rstrip().split(',') #using rstrip to remove the \n
chr=fname[0]
pos=int(fname[1])
#Filter polymorphic regions upstream and downstream from SNP
subs=flanks[(flanks.iloc[:,0]==chr)&(flanks.iloc[:,1]>pos-freg)&(flanks.iloc[:,1]<pos+freg)]
subs2=subs.sort_values(by=1, ascending=False)
l1=">"+chr+":"+str(pos-freg)+"-"+str(pos-1)
left=ref.fetch(chr,pos-freg,pos)
l2=">"+chr+":"+str(pos)+"-"+str(pos)
snp = "["+fname[2]+"/"+fname[3]+"]"
l3=">"+chr+":"+str(pos+1)+"-"+str(pos+freg)
#Pull flanking sequence from fasta
right=ref.fetch(chr,pos+1,pos+1+freg)
if subs2.shape[0]>0:
for i in range(0,subs2.shape[0]):
x = subs2.iloc[i,:]
if x.iloc[1]<pos:
#print("left")
#Position of SNP in string
a=(freg)-(pos-x[1])
fs=pos-freg
left=left[0:a]+"["+x[2]+"/"+x[3]+"]"+left[(a+1):(pos+freg)]
elif x.iloc[1]>pos:
#print('right')
#Position of flanking SNP in string
a=(freg)-(freg-(x[1]-pos))-1
right=right[0:a]+"["+x[2]+"/"+x[3]+"]"+right[(a+1):(pos+freg)]
lines=l1,"\n",left,"\n",l2,"\n",snp,"\n",l3,"\n",right,"\n"
outfile.writelines(lines)
outfile.close()
return(0)
``` |
{
"source": "J-N-ch/GMRES_py",
"score": 3
} |
#### File: J-N-ch/GMRES_py/example.py
```python
import numpy as np
import sys
sys.path.append('./GMRES_API')
import GMRES
sys.path.append('./RestartAlgorithm_API')
import RestartAlgorithm
from matplotlib import pyplot as plt
def run_GMRES_restart( methods_used_to_solve_leastSqare, A_mat, b_mat, x_mat ):
# The restatrt algorithm of GMRES
#=====================================================================================================
# GMRES with restart, 2 iterations in each restart ( GMRES(2) )
GMRES_test_itr2 = GMRES.GMRES_API( A_mat, b_mat, 2)
GMRES_test_itr2.methods_used_to_solve_leastSqare_register( methods_used_to_solve_leastSqare )
restarted_GMRES = RestartAlgorithm.RestartAlgorithm()
restarted_GMRES.kernel_algorithm_register( GMRES_test_itr2 )
restarted_GMRES.restart_initial_input( x_mat )
restarted_GMRES.maximum_restarting_iteration_register( 22 )
restarted_GMRES.restarting_iteration_ending_threshold_register( 1.0e-14 )
x_final, r_trend = restarted_GMRES.run_restart()
#=====================================================================================================
return x_final, r_trend
def main():
A_mat = np.array( [
[1.00, 1.00, 1.00],
[1.50, 2.00, 1.00],
[0.30, 0.50, 3.00],
] )
b_mat = np.array( [
3.0,
2.0,
1.0,
] )
x_mat = np.array( [
1.0,
1.0,
1.0,
] )
print("x =", x_mat)
# The algorithm of GMRES without using restart
#============================================================================================================================
size_of_matrix_A = len( A_mat )
number_of_orthogonal_basis_to_be_constructed = size_of_matrix_A
original_GMRES_test = GMRES.GMRES_API( A_mat, b_mat, number_of_orthogonal_basis_to_be_constructed )
original_GMRES_test.initial_guess_input( x_mat )
original_GMRES_test.methods_used_to_solve_leastSqare_register("leastSquare_solver_numpy")
original_GMRES_final_x = original_GMRES_test.run()
print("original_GMRES_final_x = ", original_GMRES_final_x, "residual_norm = ", original_GMRES_test.final_residual_norm,"\n")
#============================================================================================================================
"""
# The restatrt algorithm of GMRES
#======================================================================================
# GMRES with restart, 2 iterations in each restart ( GMRES(2) )
GMRES_test_itr2 = GMRES.GMRES_API( A_mat, b_mat, 2)
#GMRES_test_itr2.methods_used_to_solve_leastSqare_register("Givens_rotation")
#GMRES_test_itr2.methods_used_to_solve_leastSqare_register("QR_decomposition_numpy")
GMRES_test_itr2.methods_used_to_solve_leastSqare_register("leastSquare_solver_numpy")
restarted_GMRES = RestartAlgorithm.RestartAlgorithm()
restarted_GMRES.kernel_algorithm_register( GMRES_test_itr2 )
restarted_GMRES.restart_initial_input( x_mat )
restarted_GMRES.maximum_restarting_iteration_register( 22 )
restarted_GMRES.restarting_iteration_ending_threshold_register( 1.0e-14 )
x_final, r_trend = restarted_GMRES.run_restart()
#======================================================================================
"""
x_final_1, r_trend_1 = run_GMRES_restart("Givens_rotation", A_mat, b_mat, x_mat )
x_final_2, r_trend_2 = run_GMRES_restart("QR_decomposition_numpy", A_mat, b_mat, x_mat )
x_final_3, r_trend_3 = run_GMRES_restart("leastSquare_solver_numpy", A_mat, b_mat, x_mat )
print(" original_GMRES_final_x = ", original_GMRES_final_x, "residual_norm = ", original_GMRES_test.final_residual_norm)
#print("restarting_GMRES_final_x = ", x_final, "residual_norm = ", GMRES_test_itr2.final_residual_norm)
xx = np.matmul( np.linalg.inv(A_mat), b_mat )
print("Ans directly solved : x = ", xx)
# Draw the residual trend by the sequence of restarts
#============================================
plt.title("restarted_GMRES_residual_trend")
plt.xlabel("restart")
plt.ylabel("residual")
#plt.plot(r_trend)
plt.plot(r_trend_1)
plt.plot(r_trend_2)
plt.plot(r_trend_3)
plt.show()
#============================================
if __name__ == '__main__':
main()
```
#### File: GMRES_py/GMRES_API/GMRES.py
```python
import scipy.linalg as splinalg
import numpy as np
import math
#test_Givens_out_of_for_loop = False
test_Givens_out_of_for_loop = True
class GMRES_API(object):
def __init__( self,
A_coefficient_matrix: np.array([], dtype = float ),
b_boundary_condition_vector: np.array([], dtype = float ),
maximum_number_of_basis_used: int,
threshold = 1.0e-16 ):
self.A = A_coefficient_matrix
self.b = b_boundary_condition_vector
self.maximum_number_of_basis_used = maximum_number_of_basis_used
self.threshold = threshold
# Default methods_used_to_solve_leastSqare
#self.methods_used_to_solve_leastSqare = "Givens_rotation"
#self.methods_used_to_solve_leastSqare = "QR_decomposition_numpy"
self.methods_used_to_solve_leastSqare = "leastSquare_solver_numpy"
def methods_used_to_solve_leastSqare_register(self, methods_used_to_solve_leastSqare):
self.methods_used_to_solve_leastSqare = methods_used_to_solve_leastSqare
def initial_guess_input( self, x_input_vector_initial_guess: np.array([], dtype = float ) ):
self.x = x_input_vector_initial_guess
try:
assert len( self.x ) == len( self.b )
except Exception:
print(" The input guess vector's size must equal to the system's size !\n")
print(" The matrix system's size == ", len( self.b ))
print(" Your input vector's size == ", len( self.x ))
self.x = np.zeros( len( self.b ) )
print(" Use default input guess vector = ", self.x, " instead of the incorrect vector you given !\n")
def run( self ):
n = len( self.A )
m = self.maximum_number_of_basis_used
r = self.b - np.dot(self.A , self.x)
r_norm = np.linalg.norm( r )
b_norm = np.linalg.norm( self.b )
self.error = np.linalg.norm( r ) / b_norm
self.e = [self.error]
# initialize the 1D vectors
sn = np.zeros( m )
cs = np.zeros( m )
e1 = np.zeros( m + 1 )
e1[0] = 1.0
# beta is the beta vector instead of the beta scalar
beta = r_norm * e1
beta_test = r_norm * e1
H = np.zeros(( m+1, m+1 ))
H_test = np.zeros(( m+1, m+1 ))
Q = np.zeros(( n, m+1 ))
Q[:,0] = r / r_norm
#-----------------------------------------------------------------------------------------------
for k in range(m):
( H[0:k+2, k], Q[:, k+1] ) = __class__.arnoldi( self.A, Q, k)
#H_test[:,k] = H[:,k]
H_test = H
#print("H_test =\n",H_test)
if test_Givens_out_of_for_loop is not True:
( H[0:k+2, k], cs[k], sn[k] ) = __class__.apply_givens_rotation( H[0:k+2, k], cs, sn, k)
# update the residual vector
beta[ k+1 ] = -sn[k] * beta[k]
beta[ k ] = cs[k] * beta[k]
# calculate and save the errors
self.error = abs(beta[k+1]) / b_norm
self.e = np.append(self.e, self.error)
if( self.error <= self.threshold):
break
#-----------------------------------------------------------------------------------------------
if test_Givens_out_of_for_loop is True:
if self.methods_used_to_solve_leastSqare == "Givens_rotation":
# 1. My first GMRES written using Givens rotation to solve lstsq
#---------------------------------------------------------------------------------------------------------------------
H_Givens_test = np.copy(H_test)
for k in range(m):
( H_Givens_test[0:k+2, k], cs[k], sn[k] ) = __class__.apply_givens_rotation( H_Givens_test[0:k+2, k], cs, sn, k)
# update the residual vector
beta[ k+1 ] = -sn[k] * beta[k]
beta[ k ] = cs[k] * beta[k]
#print("H_Givens_test =\n", H_Givens_test)
#print("beta =\n", beta)
#y = __class__.__back_substitution( H_Givens_test[0:m+1, 0:m+1], beta[0:m+1] )
#y = np.matmul( np.linalg.inv( H_Givens_test[0:m+1, 0:m+1]), beta[0:m+1] )
#y = splinalg.solve_triangular(H_Givens_test[0:m, 0:m],beta[0:m] )
y = np.linalg.lstsq(H_Givens_test[0:m, 0:m], beta[0:m])[0]
#---------------------------------------------------------------------------------------------------------------------
elif self.methods_used_to_solve_leastSqare == "QR_decomposition_numpy":
# 2. GMRES using QR decomposition to solve lstsq
#---------------------------------------------------------------
H_QR_test = np.copy(H_test)
QR_q, QR_r = np.linalg.qr(H_QR_test, mode='reduced')
#print(QR_q)
#print("QR_r =\n", QR_r)
#print(beta_test)
new_beta = np.matmul( QR_q.T, beta_test )
#print(new_beta[0:m])
#print("new_beta =",new_beta)
#y = splinalg.solve_triangular(QR_r[0:m, 0:m],new_beta[0:m] )
#y = np.linalg.lstsq(QR_r[:,0:m],new_beta )[0]
y = np.linalg.lstsq(QR_r[0:m, 0:m],new_beta[0:m], rcond=-1 )[0]
#---------------------------------------------------------------
elif self.methods_used_to_solve_leastSqare == "leastSquare_solver_numpy":
# 3. GMRES directly using numpy.linalg.lstsq to solve lstsq (the most success one until now !)
#---------------------------------------------------------------
#print(H_test[0:m+1, 0:m])
#print(beta_test)
#y = np.linalg.solve(H_test[0:m, 0:m], beta_test[0:m])
y = np.linalg.lstsq(H_test[0:m+1, 0:m], beta_test)[0]
#---------------------------------------------------------------
else:
print("please set methods_used_to_solve_leastSqare.")
else:
# 1. My first GMRES written using Givens rotation to solve lstsq(but put the Givens with arnoldi)
#-----------------------------------------------------------------------------------
# calculate the result
#y = np.matmul( np.linalg.inv( H[0:k+1, 0:k+1]), beta[0:k+1] )
#TODO Due to H[0:k+1, 0:k+1] being a upper tri-matrix, we can exploit this fact.
y = __class__.__back_substitution( H[0:m+1, 0:m+1], beta[0:m+1] )
#-----------------------------------------------------------------------------------
#print("y =", y)
self.x = self.x + np.matmul( Q[:,0:k+1], y )
self.final_residual_norm = np.linalg.norm( self.b - np.matmul( self.A, self.x ) )
return self.x
'''''''''''''''''''''''''''''''''''
' Arnoldi Function '
'''''''''''''''''''''''''''''''''''
@staticmethod
def arnoldi( A, Q, k ):
h = np.zeros( k+2 )
q = np.dot( A, Q[:,k] )
for i in range ( k+1 ):
h[i] = np.dot( q, Q[:,i])
q = q - h[i] * Q[:, i]
h[ k+1 ] = np.linalg.norm(q)
q = q / h[ k+1 ]
return h, q
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
' Applying Givens Rotation to H col '
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''
@staticmethod
def apply_givens_rotation( h, cs, sn, k ):
for i in range( k-1 ):
temp = cs[i] * h[i] + sn[i] * h[i+1]
h[i+1] = -sn[i] * h[i] + cs[i] * h[i+1]
h[i] = temp
# update the next sin cos values for rotation
cs_k, sn_k, h[k] = __class__.givens_rotation( h[k-1], h[k] )
# eliminate H[ k+1, i ]
h[k + 1] = 0.0
return h, cs_k, sn_k
##----Calculate the Given rotation matrix----##
# From "http://www.netlib.org/lapack/lawnspdf/lawn150.pdf"
# The algorithm used by "<NAME>"
@staticmethod
def givens_rotation( v1, v2 ):
if( v2 == 0.0 ):
cs = np.sign(v1)
sn = 0.0
r = abs(v1)
elif( v1 == 0.0 ):
cs = 0.0
sn = np.sign(v2)
r = abs(v2)
elif( abs(v1) > abs(v2) ):
t = v2 / v1
u = np.sign(v1) * math.hypot( 1.0, t )
cs = 1.0 / u
sn = t * cs
r = v1 * u
else:
t = v1 / v2
u = np.sign(v2) * math.hypot( 1.0, t )
sn = 1.0 / u
cs = t * sn
r = v2 * u
return cs, sn, r
# From https://stackoverflow.com/questions/47551069/back-substitution-in-python
@staticmethod
def __back_substitution( A: np.ndarray, b: np.ndarray) -> np.ndarray:
n = b.size
if A[n-1, n-1] == 0.0:
raise ValueError
x = np.zeros_like(b)
x[n-1] = b[n-1] / A[n-1, n-1]
for i in range( n-2, -1, -1 ):
bb = 0
for j in range ( i+1, n ):
bb += A[i, j] * x[j]
x[i] = (b[i] - bb) / A[i, i]
return x
def final_residual_info_show( self ):
print( "x =", self.x, "residual_norm = ", self.final_residual_norm )
def main():
A_mat = np.array( [[1.00, 1.00, 1.00],
[1.00, 2.00, 1.00],
[0.00, 0.00, 3.00]] )
b_mat = np.array( [3.0, 2.0, 1.0] )
GMRES_itr2 = GMRES_API( A_mat, b_mat, 2, 0.01)
x_mat = np.array( [1.0, 1.0, 1.0] )
print("x =", x_mat)
# GMRES with restart, 2 iterations in each restart ( GMRES(2) )
max_restart_counts = 100
for restart_counter in range(max_restart_counts):
GMRES_itr2.initial_guess_input( x_mat )
x_mat = GMRES_itr2.run()
print(restart_counter+1," : x =", x_mat)
xx = np.matmul( np.linalg.inv(A_mat), b_mat )
print("ANS : xx =", xx)
if __name__ == '__main__':
main()
```
#### File: GMRES_py/RestartAlgorithm_API/RestartAlgorithm.py
```python
import numpy as np
class RestartAlgorithm(object):
def __init__( self, *args, **kwargs ):
# The default number of restarting
self.max_rst_iter = 1
# The default iteration's ending threshold
self.restarting_iteration_ending_threshold = 1.0e-16
def kernel_algorithm_register( self, kernel_algorithm ):
self.k_algo = kernel_algorithm
def restart_initial_input( self, initial_input_vector ):
self.init_in = initial_input_vector
def maximum_restarting_iteration_register( self, maximum_restarting_iteration ):
self.max_rst_iter = maximum_restarting_iteration
def restarting_iteration_ending_threshold_register( self, restarting_iteration_ending_threshold ):
self.restarting_iteration_ending_threshold = restarting_iteration_ending_threshold
def run_restart( self ):
self.restart_output = np.array([], dtype = float )
self.final_residual_trend = np.array([], dtype = float )
try:
for restart_counter in range(self.max_rst_iter):
self.k_algo.initial_guess_input( self.init_in )
# run the kernel algorithm in each restart
self.restart_output = self.k_algo.run()
self.init_in = self.restart_output
print( restart_counter+1, ": ", end = '' )
self.k_algo.final_residual_info_show()
self.final_residual_trend = np.append( self.final_residual_trend, self.k_algo.final_residual_norm )
if( self.k_algo.final_residual_norm < self.restarting_iteration_ending_threshold ):
print("\nThe restarting iteration's ending threshold ",self.restarting_iteration_ending_threshold," has been reached !\n")
break
except:
print("\n !! ERROR !! Some parameters stiil have not been registered !!!\n")
if 'self.k_algo' not in locals():
print(" Please use \"kernel_algorithm_register( <your_kernel_algorithm> )\" to register a kernel algorithm !!\n")
if 'self.init_in' not in locals():
print(" Please use \"restart_initial_input( <your_initial_input_vector> )\" to register a initial input-vector !!\n")
finally:
return self.restart_output, self.final_residual_trend
``` |
{
"source": "J-N-ch/matplotlib_slider_example",
"score": 3
} |
#### File: J-N-ch/matplotlib_slider_example/slider_demo_JN.py
```python
import time
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider
def example_1st():
############################################################################
# 1st Example start
############################################################################
fig = plt.figure()
#用t和s定義2D線段
#===============================
t = np.arange(0, 10, 1)
print("t =", t)
s = 0 * t
print("s =", s)
#-------------------------
l, = plt.plot( t, s )
print("l = ", l)
l2, = plt.plot( s, t )
print("l2 = ", l2)
#===============================
#定義slider的位置(大小)
#===========================================
location_of_slider_1 = plt.axes([
0.2, # slider box x-coordinate start
0, # slider box y-coordinate start
0.7, # slider box x-coordinate end
0.05, # slider box y-coordinate end
])
#===========================================
#產生一個Slider的物件叫做slider_1
#==========================================================
slider_1 = Slider(
location_of_slider_1, # 採用定上面定義的slider位置
'slider_name', # 顯示的名字
0, # slider_1的最小值
9, # slider_1的最大值
valinit=5, # slider_1的初始值
valstep=1, # 滑動時每一步的步長
)
#==========================================================
#定義一個 『函式』,這個函式的內容是slider的值被更新時所對應要做的事情
#=========================================================================
def update_slider_1(val):
l.set_ydata( slider_1.val ) # slider_1.val 就是目前slider_1被滑到的值
plt.draw() #根據調正過後的值重新畫圖
#=========================================================================
#定義當slider_1的值被改變時要做哪個『函式』所定義的事
slider_1.on_changed( update_slider_1 )
#將以上所定義的規則套用在的那個『圖』顯示在螢幕上(從記憶體內被讀取出來)
plt.show()
############################################################################
# 1st Example end
############################################################################
def example_2nd():
#################################################################
# 2nd Example start
#################################################################
fig = plt.figure()
_3d_fig = fig.gca(projection='3d') # gca = Get Current Axis !!!
_3d_fig.set_xlim( xmin= 0, xmax=1)
_3d_fig.set_ylim( ymin= 0, ymax=1)
_3d_fig.set_zlim( zmin= 0, zmax=1)
# plot the slider
initial_xx = 0.25
initial_zz = 0.25
# x_start, y_start, x_end, y_end
axamp_xx = plt.axes([.25, .01, .55, .06])
axamp_zz = plt.axes([.06, .25, .07, .55])
# Slider defined
samp_xx = Slider(axamp_xx, 'X', 0, 0.5, valinit=initial_xx, \
orientation='horizontal', valstep=0.01)
samp_zz = Slider(axamp_zz, 'Z', 0, 0.5, valinit=initial_zz, \
orientation='vertical', valstep=0.01)
# plot black ab_line
a_xyz =( 0, 0, 0 )
b_xyz =( initial_xx, 0.5, initial_zz )
_3d_fig.plot(
[a_xyz[0], b_xyz[0]], # x-direction
[a_xyz[1], b_xyz[1]], # y-direction
[a_xyz[2], b_xyz[2]], # z-direction
color="black")
def update(val):
#--------------------------------------------
# xx is the current value of the slider
xx = samp_xx.val # paper's x
zz = samp_zz.val # paper's y
_3d_fig.clear()
_3d_fig.set_xlim( xmin= 0, xmax=1)
_3d_fig.set_ylim( ymin= 0, ymax=1)
_3d_fig.set_zlim( zmin= 0, zmax=1)
# plot black ab_line
a_xyz =( 0, 0, 0 )
b_xyz =( xx, 0.5, zz )
_3d_fig.plot(
[a_xyz[0], b_xyz[0]], # x-direction
[a_xyz[1], b_xyz[1]], # y-direction
[a_xyz[2], b_xyz[2]], # z-direction
color="black")
plt.draw() # redraw
#--------------------------------------------
samp_xx.on_changed(update)
samp_zz.on_changed(update)
plt.show()
#################################################################
# 2nd Example end
#################################################################
def input_collecting():
input_number = input( \
"================================================\n" \
"\n" \
"Please choose one example to begin with or quit.\n" \
"\n" \
"================================================\n" \
"1st Example (1)\n" \
"2nd Example (2)\n" \
"Quit (q)\n" \
"================================================\n" \
"\n" \
"Please enter your choice at here : ")
return input_number
# Start to run the main program from here!
#============================================
input_number = input_collecting()
#--------------------------------------
while not input_number == "q":
#------------------------
if input_number=="1":
example_1st()
elif input_number=="2":
example_2nd()
#------------------------
for i in range(100):
print("")
input_number = input_collecting()
#--------------------------------------
print("Goodbye !!")
#============================================
``` |
{
"source": "jnclt/simple_tensorflow_serving",
"score": 3
} |
#### File: clients/python_client/estimator_client.py
```python
import requests
import base64
def main():
string_data1 = '\n\x1f\n\x0e\n\x01a\x12\t\n\x07\n\x05hello\n\r\n\x01b\x12\x08\x12\x06\n\x04\x00\x00\x00?'
string_data2 = '\n \n\x0f\n\x01a\x12\n\n\x08\n\x06\xe4\xbd\xa0\xe5\xa5\xbd\n\r\n\x01b\x12\x08\x12\x06\n\x04\x00\x00\x80\xbf'
# Example: Ch8KDgoBYRIJCgcKBWhlbGxvCg0KAWISCBIGCgQAAAA_
string_data1_b64 = base64.urlsafe_b64encode(string_data1)
string_data2_b64 = base64.urlsafe_b64encode(string_data2)
string_datas = [{"b64": string_data1_b64}, {"b64": string_data2_b64}]
endpoint = "http://127.0.0.1:8500"
input_data = {"data": {"inputs": string_datas}}
result = requests.post(endpoint, json=input_data)
print(result.text)
if __name__ == "__main__":
main()
```
#### File: clients/python_client/mllib_lr_client.py
```python
import requests
def main():
endpoint = "http://127.0.0.1:8500"
input_data = {
"model_name": "default",
"model_version": 1,
"data": {
"format": "libsvm",
"max_ids": 692,
"ids": [128, 129, 130],
"values": [51, 159, 20]
}
}
result = requests.post(endpoint, json=input_data)
print(result.text)
if __name__ == "__main__":
main()
```
#### File: tools/tensorflow_estimator_tool/generate_estimator_string.py
```python
import base64
import tensorflow as tf
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def main():
input_file_name = "data.txt"
seperator_symbol = " "
# Example: {'age': _float_feature(value=25), 'workclass': _bytes_feature(value='Private'.encode())}
feature_dict = {}
serialized_strings = []
with open(input_file_name, "r") as f:
lines = f.readlines()
keys = [item.strip() for item in lines[0].split(seperator_symbol)]
types = [item.strip() for item in lines[1].split(seperator_symbol)]
for i in range(2, len(lines)):
items = [item.strip() for item in lines[i].split(seperator_symbol)]
for j in range(len(items)):
item = items[j]
if types[j] == "float":
item = float(item)
feature_dict[keys[j]] = _float_feature(value=item)
elif types[j] == "string":
feature_dict[keys[j]] = _bytes_feature(value=item)
example = tf.train.Example(features=tf.train.Features(
feature=feature_dict))
serialized = example.SerializeToString()
serialized_strings.append(serialized)
serialized_proto = tf.contrib.util.make_tensor_proto(
serialized_strings, dtype=tf.string)
serialized_proto_handle = serialized_proto.string_val
# Example: "\n\x1f\n\x0e\n\x01a\x12\t\n\x07\n\x05hello\n\r\n\x01b\x12\x08\x12\x06\n\x04\x00\x00\x00?"
proto_string = serialized_proto_handle.pop()
base64_proto_string = base64.urlsafe_b64encode(proto_string)
print("Base64 string: {}".format(base64_proto_string))
if __name__ == "__main__":
main()
``` |
{
"source": "JnCM/TP_redes",
"score": 4
} |
#### File: TP_redes/src/estacao.py
```python
class Estacao:
#Construtor da classe
def __init__(self, idEstacao):
#Atributo responsável por armazenar o identificador de uma estação
self.idEstacao = idEstacao
#Atributo responsável por armazenar o slot de transmissão correspondente de uma estação
self.slot = 1
#Atributo responsável por indicar se uma estação transmitiu seu quadro
self.transmitiu = 0
#Atributo responsável por indicar o número de colisões sucessivas de uma estação
self.nColisoes = 0
#Método que altera o valor do slot de uma estação
def setSlot(self, novoSlot):
self.slot = novoSlot
#Método que altera o valor da flag que indica se uma estação transmitiu
def setTransmitiu(self):
self.transmitiu = 1
#Método que retorna o identificador de uma estação
def getIdEstacao(self):
return self.idEstacao
#Método que retorna o respectivo slot de uma estação
def getSlot(self):
return self.slot
#Método que retorna a flag de transmissão de uma estação
def getTransmitiu(self):
return self.transmitiu
#Método que retorna o número de colisões sucessivas de uma estação
def getNColisoes(self):
return self.nColisoes
#Método que incrementa o número de colisões sucessivas de uma estação
def incrementaColisao(self):
self.nColisoes += 1
```
#### File: TP_redes/src/slottedAloha.py
```python
from random import randint
from estacao import Estacao
from utils import verificaColisao
#Método que simula a execução do algoritmo Slotted Aloha
def slottedAloha(estacoes, n):
slotAtual = transmissoes = flagPrimeira = 0
estacoesParaTransmitir = []
while transmissoes < n:
if slotAtual == 0:
slotAtual = 1
else:
for e in estacoes:#Verifica se há estações para serem transmitidas no slot atual
if e.getSlot() == slotAtual:
estacoesParaTransmitir.append(e)
flagColisao = verificaColisao(estacoesParaTransmitir)#Verifica a ocorrência de colisão
if flagColisao == 0:
for e in estacoes:#Realiza a transmissão do quadro da estação
if e.getIdEstacao() == estacoesParaTransmitir[0].getIdEstacao():
e.setTransmitiu()
break
transmissoes += 1
if flagPrimeira == 0:#Resgata o tempo gasto da primeira estação
slotPrimeira = estacoesParaTransmitir[0].getSlot()
flagPrimeira = 1
elif flagColisao == 1:
for e in estacoes:#Trata a colisão gerando o próximo slot para as estações que colidiram
for eT in estacoesParaTransmitir:
if e.getIdEstacao() == eT.getIdEstacao():
e.setSlot(randint(slotAtual+1, slotAtual+n))
slotAtual += 1
estacoesParaTransmitir.clear()
#Retorna o total de slots de tempo gastos e o tempo gasto da primeira estação
return slotPrimeira, slotAtual
``` |
{
"source": "jncox/pythonclass",
"score": 4
} |
#### File: pythonclass/classes/fridge.py
```python
from contextlib import closing
class RefrigeratorRaider:
"""Raid a refrigerator"""
def open(self):
print("Open fridge door")
def take(self, food):
print("Finding {}...".format(food))
if food == 'deep fried pizza':
raise RuntimeError("Health Warning!")
print("taking {}".format(food))
def close(self):
print("close fridge door")
def raid(food):
with closing(RefrigeratorRaider()) as r:
r.open()
r.take(food)
``` |
{
"source": "jncraton/bricki",
"score": 3
} |
#### File: bricki/bricki/cli.py
```python
from enum import Enum
import subprocess
import helpers
help = """
Commands:
{quantity},{color},{part} - inserts a part transaction
{quantity},{color} - inserts a part transaction using the most recent part
{quantity},{set} - insterts a set transaction
0,{color},{part} - gets current part count
.note {str} - sets the note to use for future transactions
.recent - lists recent transactions
.undo - remvoes last transaction
.rb - show current part on Rebrickable
.help - show this message
.exit - exits the program
{anything else} - search
"""
class CommandType(Enum):
SET_TRANSACTION = 1
PART_TRANSACTION = 2
SEARCH = 3
NOTE = 4
RECENT = 5
UNDO = 6
SHOW_REBRICKABLE = 7
class Command:
"""
Parses a single command
>>> Command('.note my_note').type.name
'NOTE'
>>> Command('.note my_note').note
'my_note'
>>> Command('.recent').type.name
'RECENT'
>>> Command('Brick 1x4').type.name
'SEARCH'
>>> Command('61,red,Brick 1x4').type.name
'PART_TRANSACTION'
>>> Command('61,red,Brick 1x4').part
'Brick 1 x 4'
>>> Command('6,red,Brick 1x4').color
'Red'
>>> Command('6,red,Brick 1x4').quantity
6
"""
def __init__(self, text, default_part=None, default_color=None, default_quantity=1):
self.type = None
self.text = text
self.note = None
self.set = None
self.set_num = None
self.color = default_color
self.color_id = None
self.part = default_part
self.part_num = None
self.quantity = default_quantity
try:
(self.quantity, self.color, self.part) = text.split(",")
if not self.color:
self.color = default_color
if not self.part:
self.part = default_part
try:
self.color_id, self.color = helpers.search_color(self.color)[0]
except IndexError:
print("Color not found: %s" % self.color)
self.type = None
return
try:
self.part_num, self.part = helpers.search_part(self.part)[0]
except IndexError:
print("Part not found: %s" % self.part)
self.type = None
return
self.type = CommandType.PART_TRANSACTION
except ValueError:
try:
(self.quantity, self.color) = text.split(",")
if not self.color:
self.color = default_color
try:
self.color_id, self.color = helpers.search_color(self.color)[0]
self.part_num, self.part = helpers.search_part(self.part)[0]
self.type = CommandType.PART_TRANSACTION
except IndexError:
try:
self.set_num, self.set = helpers.search_set(self.color)[0]
self.type = CommandType.SET_TRANSACTION
except IndexError:
print("Color not found: %s" % self.color)
self.type = None
return
except ValueError:
if text == ".exit":
exit(0)
elif text == ".recent":
self.type = CommandType.RECENT
elif text == ".undo":
self.type = CommandType.UNDO
elif text == ".rb":
self.type = CommandType.SHOW_REBRICKABLE
elif text[0:6] == ".note ":
self.note = text[6:]
self.type = CommandType.NOTE
elif text == ".help" or text == "help":
self.type = None
else:
self.type = CommandType.SEARCH
try:
self.quantity = int(self.quantity)
except ValueError:
self.quantity = default_quantity
if __name__ == "__main__":
last_part = None
last_color = None
note = None
last_note = helpers.query(
"select notes from part_transactions order by rowid desc limit 1"
)
if last_note and last_note[0] and last_note[0][0]:
note = last_note[0][0]
while True:
if last_part or last_color:
print("Current part/color: %s %s" % (last_color, last_part.encode('ascii','ignore').decode()))
else:
print(help)
if note:
print("Current Note: %s" % note)
command = Command(input("> "), default_part=last_part, default_color=last_color)
if not command.type:
print(help)
if command.type == CommandType.NOTE:
note = command.note
if command.type == CommandType.UNDO:
helpers.query(
"delete from part_transactions where rowid = (select max(rowid) from part_transactions)"
)
command.type = CommandType.RECENT
if command.type == CommandType.SHOW_REBRICKABLE:
subprocess.run(
[
"firefox",
"https://rebrickable.com/parts/"
+ helpers.search_part(last_part)[0][0],
]
)
if command.type == CommandType.RECENT:
recent = helpers.query(
"select quantity, colors.name, parts.name, notes from part_transactions join colors on colors.id = part_transactions.color_id join parts on parts.part_num = part_transactions.part_num order by date desc limit 20"
)
for t in recent:
print("%d,%s,%s %s" % (t[0], t[1], t[2].encode('ascii','ignore').decode(), t[3]))
if command.type == CommandType.SEARCH:
print("Search results:")
results = helpers.search_part(command.text)[:40]
try:
last_part = results[0][1]
except IndexError:
pass
for part in results:
print(f"{part[0]:<10} {part[1].encode('ascii','ignore').decode()}")
if command.type == CommandType.SET_TRANSACTION:
print(
"Adding %d %s (%s)" % (command.quantity, command.set, command.set_num)
)
helpers.add_set(command.set_num, command.quantity, note)
if command.type == CommandType.PART_TRANSACTION:
last_part = command.part
last_color = command.color
if command.quantity != 0:
print(
"Adding %d %s (%d) %s (%s)"
% (
command.quantity,
command.color,
command.color_id,
command.part.encode('ascii','ignore').decode(),
command.part_num,
)
)
helpers.add_part(
command.part_num, command.color_id, command.quantity, note
)
else:
print("Nothing to add. Querying current part information...")
print(
"Current quantity: %d"
% (helpers.get_part_total(command.part_num, command.color_id))
)
``` |
{
"source": "jncraton/brunsli",
"score": 2
} |
#### File: jncraton/brunsli/compiler_config_setting.bzl
```python
def create_msvc_config():
# The "do_not_use_tools_cpp_compiler_present" attribute exists to
# distinguish between older versions of Bazel that do not support
# "@bazel_tools//tools/cpp:compiler" flag_value, and newer ones that do.
# In the future, the only way to select on the compiler will be through
# flag_values{"@bazel_tools//tools/cpp:compiler"} and the else branch can
# be removed.
if hasattr(cc_common, "do_not_use_tools_cpp_compiler_present"):
native.config_setting(
name = "msvc",
flag_values = {
"@bazel_tools//tools/cpp:compiler": "msvc-cl",
},
visibility = ["//visibility:public"],
)
else:
native.config_setting(
name = "msvc",
values = {"compiler": "msvc-cl"},
visibility = ["//visibility:public"],
)
``` |
{
"source": "jncraton/nperf",
"score": 4
} |
#### File: nperf/nperf/web.py
```python
from urllib import request
import socket
import time
def is_internet_up():
""" Checks if we have access to the Internet
This uses the same HTTP request that Android uses to confirm connectivity
This requires valid DNS and a working connection to the external host.
>>> is_internet_up()
True
"""
res = request.urlopen('http://connectivitycheck.gstatic.com/generate_204')
return res.getcode() == 204
def get_down_speed(timeout=.5):
"""
Returns download speed in mbps
Download speed is determined by downloading a test file from Softlayer using
a single HTTP connection.
`timeout` is used to set measurement duration
>>> get_down_speed() > 1
True
"""
bytes = 0
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(('speedtest.wdc01.softlayer.com', 80))
s.sendall(b'GET /downloads/test100.zip HTTP/1.1\r\nHost:speedtest.wdc01.softlayer.com\r\n\r\n')
start = time.time()
while time.time() < start + timeout:
data = s.recv(1024 * 16)
bytes += len(data)
return 8 * bytes / timeout / 1024 / 1024
``` |
{
"source": "jncraton/procedural-bricks",
"score": 3
} |
#### File: procedural-bricks/proceduralbricks/constants.py
```python
from enum import Enum
# Connections
BRICK_EVEN = 1
BRICK_ODD = 2
class Facing(Enum):
FRONT = (0, 1, 2, 1)
BACK = (0, 1, 2, -1)
LEFT = (2, 1, 0, 1)
RIGHT = (2, 1, 0, -1)
def __init__(self, x, y, z, flip):
self.x = x
self.y = y
self.z = z
self.flip = flip
# Parts
BRICK_1X1 = '3005'
BRICK_1X2 = '3004'
BRICK_1X2_MASONRY = '98283'
WINDOW_1X2X2 = '60592C01'
WINDOW_1X2X3 = '60593C01'
GLASS_1X4X6 = '57895'
DOOR_1X4X6_FRAME = '30179'
DOOR_1X4X6_3_PANE = '30178C01'
DOOR_1X4X6_4_PANE = '60623'
# Colors
BLACK = 0
TAN = 19
SAND_BLUE = 379
TRANS_CLEAR = 47
```
#### File: procedural-bricks/proceduralbricks/main.py
```python
from random import random
from buildings import ModularBuilding
def main():
building = ModularBuilding()
with open('test.ldr', 'w') as out:
out.write(building.to_ldr())
if __name__ == '__main__':
main()
``` |
{
"source": "jncsw/Smart-Script",
"score": 2
} |
#### File: clcheck/clchecker/store.py
```python
import pymongo
import collections
import re
import logging
import config as config
logger = logging.getLogger(__name__)
class Command:
def __init__(self, command_name, tx_syntax, clsname_to_readable_syntax,
concrete_specs, explantion, eman):
self.command_name = command_name
self.tx_syntax = tx_syntax
self.clsname_to_readable_syntax = clsname_to_readable_syntax
self.concrete_specs = concrete_specs
self.explanation = explantion
self.eman = eman
@classmethod
def from_store(cls, doc):
return cls(doc['command_name'], doc['tx_syntax'],
doc['clsname_to_readable_syntax'], doc['concrete_specs'],
doc['explanation'], doc['eman'])
def to_store(self):
return {
'command_name': self.command_name,
'tx_syntax': self.tx_syntax,
'clsname_to_readable_syntax': self.clsname_to_readable_syntax,
'concrete_specs': self.concrete_specs,
"explanation": self.explanation,
'eman': self.eman
}
class Store():
'''read/write from/to mongodb'''
def __init__(self, db='clchecker', host=config.MONGO_URI):
self.connection = pymongo.MongoClient(host)
self.db = self.connection[db]
self.commands = self.db['commands']
# self.commands.create_index([('command_name', 'text')])
def close(self):
self.connection.disconnect()
self.commands = None
self.command_to_id = None
def drop_collection(self, confirm=False):
if not confirm:
logger.warning('fail to delete collection since confirm is False')
return
logger.info('dropping commands collection')
self.commands.drop()
def delete_all_documents(self, confirm=False):
if not confirm:
logger.warning(
'fail to delete ALL documents since confirm is False')
return
self.commands.delete_many({})
logger.info('delete all documents in commands collection')
def delete_document(self, command_name, confirm):
if not confirm:
logger.warning(
f'fail to delete command {command_name} since confirm is False')
return
logger.info(f'delete command {command_name} in the database')
self.commands.delete_one({"command_name": command_name})
def __contains__(self, command_name):
c = self.commands.find({'command_name': command_name}).count()
return c > 0
def __iter__(self):
for d in self.commands.find():
yield Command.from_store(d)
def findcommand(self, command_name):
'''find a command document by its name'''
doc = self.commands.find_one({'command_name': command_name})
if doc:
command = Command.from_store(doc)
return command
def addcommand(self, command, overwrite=False):
if self.findcommand(command.command_name) is not None:
if not overwrite:
raise ValueError(
f'command "{command.command_name}" is already in the database'
)
else:
logger.warning(f'overwrite command {command.command_name}')
self.commands.replace_one(
{'command_name': command.command_name}, command.to_store())
else:
self.commands.insert_one(command.to_store())
```
#### File: smartscript_web/py_checker/bug_db.py
```python
import pymongo
def connect(ip='localhost'):
db = pymongo.MongoClient(ip, 27017, username='<username>',
password='<password>',
authSource='bug_db',
authMechanism='SCRAM-SHA-1')
return db
if __name__ == '__main__':
ip = '<IP>'
db = connect(ip)
print(db.list_databases())
```
#### File: smartscript_web/py_checker/kubeModelServer.py
```python
from symspellpy import SymSpell, Verbosity
import pandas as pd
import pickle, sys, yaml
import re
from collections import defaultdict
from termcolor import colored, cprint
import re
def check_typo(sym_spell, line_words):
"""check spelling type
Args:
sym_spell (symspellpy.SymSpell): a SymSpell instance to check typo
line_words (list): a list of (line_number, word) for typo-checking
"""
typo_message = []
for n, word in line_words:
suggestions = sym_spell.lookup(word,
Verbosity.CLOSEST,
max_edit_distance=2,
transfer_casing=True)
if suggestions:
suggestion = suggestions[0]
suggested_word = suggestion.term
if word.lower() != suggested_word:
typo_message.append(
f'ypo Warning at line {n} for word "{word}"; suggestion is "{suggested_word}"'
)
return typo_message
def check_missing_entry(association_rules, entries):
"""check missing entry errors using the rules extracted with association rules mining.
Args:
rule (dict): association of always happen together pairs. association_rules[antecedent] = consequent
entries (set): entries to check
"""
missing_messages = []
keys = set(association_rules.keys())
for ante in entries:
if ante in keys:
for conse in association_rules[ante]:
if conse not in entries:
missing_messages.append(
f'Missing Entry Warning: expect "{conse}" when "{ante}" presents'
)
return missing_messages
def check_incorrect_type(type_rules, entry_type):
"""check whether entry_type follow type_rules
Args:
type_rules (dict): dict of str to set
entry_type (list): list of tuple(entry, type)
"""
incorrect_type_messages = []
keys = set(type_rules.keys())
for entry, typ in entry_type:
if entry in keys:
if typ not in type_rules[entry]:
pattern = r"<|>|class|\""
if len(type_rules[entry]) > 1:
incorrect_type_messages.append(
f'Incorrect Type Warning: expect one of {re.sub(pattern, "", str(type_rules[entry]))}, but got {re.sub(pattern, "", str(typ))} for "{entry}"'
)
else:
incorrect_type_messages.append(
f'Incorrect Type Warning: expect {re.sub(pattern, "", str(list(type_rules[entry])[0]))}, but got {re.sub(pattern, "", str(typ))} for "{entry}"'
)
return incorrect_type_messages
def flatten_dict(flat_dict, pre_key, d):
"""dfs
Args:
flat_dict (list): list of dict
pre_key (str): prefix of key
d (dict): [description]
"""
for k in d.keys():
new_pre_key = f"{pre_key}/{str(k)}"
flat_dict[new_pre_key] = d[k]
if isinstance(d[k], dict):
flatten_dict(flat_dict, new_pre_key, d[k])
def generate_flat_dict(yaml_content):
try:
docs = yaml.load_all(yaml_content, Loader=yaml.FullLoader)
except:
raise SyntaxError("I have raised an Exception")
docs_flat_dict = []
apiVersion_kind_missing_messages = []
for doc in docs:
if not doc:
apiVersion_kind_missing_messages.append([])
continue
keys = doc.keys()
error_message = []
if 'apiVersion' not in keys or 'kind' not in keys:
if 'apiVersion' not in keys:
error_message.append(
f'Missing Entry Warning: expect "apiVersion" presented in the this document'
)
if 'kind' not in keys:
error_message.append(
f'Missing Entry Warning: expect "kind" presented in this document'
)
apiVersion_kind_missing_messages.append(error_message)
docs_flat_dict.append(dict())
else:
apiVersion_kind_missing_messages.append([])
flat_dict = dict()
flat_dict['apiVersion'] = doc['apiVersion']
flat_dict['kind'] = doc['kind']
flat_dict[f"apiVersion({doc['apiVersion']})"] = doc['apiVersion']
flat_dict[f"kind({doc['kind']})"] = doc['kind']
pre_key = f"apiVersion({doc['apiVersion']})/kind({doc['kind']})"
doc.pop('apiVersion')
doc.pop('kind')
flatten_dict(flat_dict, pre_key, doc)
docs_flat_dict.append(flat_dict)
return docs_flat_dict, apiVersion_kind_missing_messages
def generate_entry_type(docs_flat_dict):
"""[summsary]
Args:
docs_flat_dict ([type]): [description]
Returns:
[type]: [description]
"""
docs_entry_type = []
for flat_dict in docs_flat_dict:
entry_type = []
for entry in flat_dict:
entry_type.append((entry, type(flat_dict[entry])))
docs_entry_type.append(entry_type)
return docs_entry_type
def parse_words(text):
"""Create a non-unique wordlist from sample text
language independent (e.g. works with Chinese characters)
"""
# // \w Alphanumeric characters (including non-latin
# characters, umlaut characters and digits) plus "_". [^\W_] is
# the equivalent of \w excluding "_".
# Compatible with non-latin characters, does not split words at
# apostrophes.
# Uses capturing groups to combine a negated set with a
# character set
matches = re.findall(r"(([^\W_]|['’])+)", text)
# The above regex returns ("ghi'jkl", "l") for "ghi'jkl", so we
# extract the first element
matches = [match[0] for match in matches]
return matches
def get_results(yaml_content,
rules_path,
type_path,
sym_spell_path=None,
out_file=None):
line_word = []
lines = yaml_content.split('\n')
# ignore empty lines and comment lines at the beginning
for i, line in enumerate(lines):
line = line.strip()
if line.startswith('#') or line == '':
continue
else:
break
# the first document does not need to startw with '---'
if lines[i].strip() == '---':
i += 1
docs_line_number = [i + 1]
start_line_index = i + 1
for j, line in enumerate(lines[start_line_index:]):
line_index = start_line_index + j
line = line.strip()
if line.startswith('#'):
continue
if line == '---':
docs_line_number.append(line_index + 1)
words = parse_words(line)
for word in words:
line_word.append((line_index + 1, word))
# if there is still codes after the last '---', the rest of code is
# still consider one valid document.
for i in range(docs_line_number[-1], len(lines)):
line = lines[i].strip()
if (not line.startswith('#')) and (line != ''):
docs_line_number.append(len(lines))
break
# check typo
# sym_spell = SymSpell()
# sym_spell.load_pickle(sym_spell_path)
# typo_message = check_typo(sym_spell, line_word)
# if typo_message:
# for m in typo_message:
# print(m)
# if out:
# out.write(m+'\n')
# print()
# load rules
df = pd.read_csv(rules_path)
association_rules = defaultdict(set)
for index, row in df.iterrows():
association_rules[row['antecedents']].add(row['consequents'])
with open(type_path, 'rb') as f:
type_rules = pickle.load(f)
docs_flat_dict, apiVersion_kind_missing_messages = generate_flat_dict(
yaml_content)
docs_entry_type = generate_entry_type(docs_flat_dict)
# check missing entry and incorrect type error
all_messages = ''
for i in range(len(docs_line_number) - 1):
flat_dict = docs_flat_dict[i]
entry_type = docs_entry_type[i]
missing_messages = check_missing_entry(association_rules,
set(flat_dict.keys()))
incorrect_type_messages = check_incorrect_type(type_rules, entry_type)
if missing_messages or incorrect_type_messages or apiVersion_kind_missing_messages[
i]:
warning = 'Warnings'
# warning = colored('Warnings', 'red')
m = warning + f' for Document Starting from line {docs_line_number[i]} to {docs_line_number[i+1]}:'
all_messages = all_messages + m + '\n'
# print(m)
for m in incorrect_type_messages + missing_messages + apiVersion_kind_missing_messages[
i]:
# print(m)
all_messages = all_messages + m + '\n'
# if out:
# out.write(m+'\n')
if out_file is not None:
with open(out_file, 'w', encoding='utf-8') as out:
out.write(all_messages)
return all_messages
def main(yaml_content):
rules_path = "/home/smartscript/smartscript_web/py_checker/confidence1_support002_rules.csv"
type_path = "/home/smartscript/smartscript_web/py_checker/entry_type.pkl.txt"
all_messages = get_results(yaml_content, rules_path, type_path)
all_messages = re.sub(r'(warnings|warning)',
r'<font color="yellow">\1</font>',
all_messages,
flags=re.IGNORECASE)
return all_messages + '<font color="#00bfff">Finish!</font>\n'
# if __name__ == "__main__":
# yaml_file_path = sys.argv[1]
# sym_spell_path = "/home/wangluochao/smartscript/configuration/sym_spell_yaml.pkl"
# rules_path = "/home/wangluochao/smartscript/configuration/confidence1_support002_rules.csv"
# type_path = "/home/wangluochao/smartscript/configuration/entry_type.pkl"
# main(yaml_file_path, sym_spell_path, rules_path, type_path)
```
#### File: smartscript_web/py_checker/newTypeModel.py
```python
import time
import os
import itertools
from collections import defaultdict
import pickle
import glob
import numpy as np
from redbaron import RedBaron
import torch
import torch.nn as nn
from torch.utils.data import random_split
import torch.optim as optim
import torch.nn.functional as F
from tqdm import tqdm
# import tokenization
# import bug_db
# import normalize
from . import tokenization
from . import bug_db
from . import normalize
# from normalize import normalize_format_string
# from tokenization import ast_tokenize_str
# In[2]:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
criterion = torch.nn.NLLLoss(reduction='mean')
encoder_n_layers = 1
decoder_n_layers = 1
batch_size = 20480
p_dropout = 0.1
model_size = 64
n_epoch = 100
vocab_size = 10000
lr = 0.001
model_folder = "/home/smartscript/smartscript_web/py_checker/model_type_fulldata/"
MAX_SEQ_LEN = 512
# In[3]:
trainWriter = open("./TrainLog.txt","w",encoding='utf-8')
validWriter = open("./ValidLog.txt","w",encoding='utf-8')
testWriter = open("./TestLog.txt","w",encoding='utf-8')
# In[4]:
with open ('/home/smartscript/smartscript_web/py_checker/model_type_fulldata/Types500.pkl', 'rb') as fp:
Types500 = pickle.load(fp)
# In[5]:
class EncoderRNN(nn.Module):
def __init__(self, hidden_size, embedding, n_layers=1, dropout=0):
super(EncoderRNN, self).__init__()
self.n_layers = n_layers
self.hidden_size = hidden_size
self.embedding = embedding
# Initialize GRU; the input_size and hidden_size params are both set to 'hidden_size'
# because our input size is a word embedding with number of features == hidden_size
self.gru = nn.GRU(
hidden_size,
hidden_size,
n_layers,
dropout=(0 if n_layers == 1 else dropout),
bidirectional=True)
self.n_directions = 2
def forward(self, input_seq, input_lengths, hidden=None):
# Convert word indexes to embeddings
embedded = self.embedding(input_seq)
# Pack padded batch of sequences for RNN module
packed = torch.nn.utils.rnn.pack_padded_sequence(
embedded, input_lengths, batch_first=False)
# Forward pass through GRU
outputs, hidden = self.gru(packed, hidden)
# Unpack padding
outputs, _ = torch.nn.utils.rnn.pad_packed_sequence(outputs, batch_first=False)
# Sum bidirectional GRU outputs
outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:]
hidden = hidden.view(self.n_layers, self.n_directions, -1, self.hidden_size)
hidden = hidden[-1, 0, :, :] + hidden[-1, 1, :, :]
# Return output and final hidden state
return outputs, hidden.unsqueeze(0)
# In[6]:
class Attn(torch.nn.Module):
def __init__(self, method, hidden_size):
super(Attn, self).__init__()
self.method = method
if self.method not in ['dot', 'general', 'concat']:
raise ValueError(self.method,
"is not an appropriate attention method.")
self.hidden_size = hidden_size
if self.method == 'general':
self.attn = torch.nn.Linear(self.hidden_size, hidden_size)
elif self.method == 'concat':
self.attn = torch.nn.Linear(self.hidden_size * 2, hidden_size)
self.v = torch.nn.Parameter(torch.FloatTensor(hidden_size))
def dot_score(self, hidden, encoder_output):
return torch.sum(hidden * encoder_output, dim=2)
def general_score(self, hidden, encoder_output):
energy = self.attn(encoder_output)
return torch.sum(hidden * energy, dim=2)
def concat_score(self, hidden, encoder_output):
energy = self.attn(
torch.cat((hidden.expand(encoder_output.size(0), -1, -1),
encoder_output), 2)).tanh()
return torch.sum(self.v * energy, dim=2)
def forward(self, hidden, encoder_outputs, attn_mask=None):
# Calculate the attention weights (energies) based on the given method
if self.method == 'general':
attn_energies = self.general_score(hidden, encoder_outputs)
elif self.method == 'concat':
attn_energies = self.concat_score(hidden, encoder_outputs)
elif self.method == 'dot':
attn_energies = self.dot_score(hidden, encoder_outputs)
# Transpose max_length and batch_size dimensions
attn_energies = attn_energies.t()
if attn_mask is not None:
attn_energies.masked_fill_(attn_mask, -1e20)
# Return the softmax normalized probability scores (with added dimension)
return F.softmax(attn_energies, dim=1).unsqueeze(1)
# In[7]:
class AttnClassifier(nn.Module):
def __init__(self,
attn_model,
embedding,
hidden_size,
output_size,
n_layers=1,
dropout=0.1):
super(AttnClassifier, self).__init__()
# Keep for reference
self.attn_model = attn_model
self.hidden_size = hidden_size
self.output_size = output_size
self.n_layers = n_layers
self.dropout = dropout
# Define layers
self.embedding = embedding
self.concat = nn.Linear(hidden_size * 2, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
self.attn = Attn(attn_model, hidden_size)
def forward(self, encoder_hidden, encoder_outputs, attn_mask):
# Calculate attention weights from the current GRU output
# attn_weights = self.attn(encoder_hidden, encoder_outputs, attn_mask)
# Multiply attention weights to encoder outputs to get new "weighted sum" context vector
# context = attn_weights.bmm(encoder_outputs.transpose(0, 1))
# Concatenate weighted context vector and GRU output using Luong eq. 5
output = encoder_hidden.squeeze(0)
# context = context.squeeze(1)
# concat_input = torch.cat((output, context), 1)
# concat_output = torch.tanh(self.concat(concat_input))
# Predict next word using Luong eq. 6
output = self.out(output)
output = F.log_softmax(output, dim=1)
# Return output and final hidden state
return output
# In[8]:
class BugDetector(nn.Module):
def __init__(self,
vocab_size,
max_seq_len,
model_size=32,
p_dropout=0.1):
super(BugDetector, self).__init__()
self.embedding = nn.Embedding(vocab_size, model_size, padding_idx=0)
self.max_seq_len = max_seq_len
self.encoder = EncoderRNN(model_size, self.embedding, encoder_n_layers, p_dropout)
self.cls = AttnClassifier('dot', self.embedding, model_size, 500, decoder_n_layers, p_dropout)
# self.apply(self.init_weights)
def init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
def forward(self, seqs, seqs_lens):
# Ignore the last EOS token
encoder_outputs, encoder_hidden = self.encoder(seqs, seqs_lens)
attn_mask = padding_mask(seqs_lens, self.max_seq_len)
output = self.cls(encoder_hidden, encoder_outputs, attn_mask)
return output
# In[9]:
# allName = np.load("../allName.npy")
# allType = np.load("../allType.npy")
# In[10]:
# i=1
# for Name,Type in tqdm(zip(allName,allType)):
# print(Name,Type)
# if i>10:
# break
# i=i+1
# In[11]:
# allName[:10]
# In[20]:
def load_data():
sp = tokenization.load_model('../SPM500/spm.model')
with open ('../moreThan500Name.pkl', 'rb') as fp:
allName = pickle.load(fp)
with open ('../moreThan500Type.pkl', 'rb') as fp:
allType = pickle.load(fp)
# allName = np.load("../allName.npy")
# allType = np.load("../allType.npy")
max_tensor_length = 0
samples = []
labels = []
print("Loading data...")
for Name,Type in tqdm(zip(allName,allType),total=len(allName)):
token_ids = tokenization.encode(sp, Name)
if len(token_ids) == 0 or len(token_ids) > MAX_SEQ_LEN:
continue
samples.append(token_ids)
labels.append(Types500.index(Type))
max_tensor_length = max(max_tensor_length, len(token_ids))
return list(zip(samples, labels)), max_tensor_length
# In[13]:
def padding_mask(seqs_lens, max_len):
mask = torch.zeros((seqs_lens.size(0), seqs_lens.max().item()), dtype=torch.uint8)
for i, seq_len in enumerate(seqs_lens):
mask[i][seq_len:] = 1
return mask.to(device)
def get_token_ids(stmt: str, word2index, index2word, word_counts, word_idx):
tokens = tokenization.ast_tokenize_str(stmt)
for token in tokens:
if token not in word2index:
word2index[token] = word_idx
index2word[word_idx] = token
word_idx += 1
word_counts[token] += 1
return tokens, word_idx
def get_tokens(stmt: str, word2index, index2word, word_counts, word_idx):
tokens = tokenization.ast_tokenize_str(stmt)
for token in tokens:
if token not in word2index:
word2index[token] = word_idx
index2word[word_idx] = token
word_idx += 1
word_counts[token] += 1
return tokens, word_idx
def calc_vocab_min_freq(word_counts, vocab_size):
# sorted_word_counts = sorted(word_counts.items(), lambda kv: kv[1])
values = list(word_counts.values())
sorted_values = sorted(values, reverse=True)
return sorted_values[vocab_size]
def save_vocab(word2index, index2word, word_counts, min_freq):
keep_word2index = {}
keep_index2word = {}
for k in word2index.keys():
if word_counts[k] >= min_freq:
keep_word2index[k] = word2index[k]
keep_index2word[word2index[k]] = k
vocab = {'word2index': word2index, 'index2word': index2word}
vocab_path = os.path.join(model_folder, "vocab.dat")
pickle.dump(vocab, open(vocab_path, 'wb'))
def load_vocab():
vocab_path = os.path.join(model_folder, "vocab.dat")
vocab = pickle.load(open(vocab_path, 'rb'))
return vocab['word2index'], vocab['index2word']
def zero_padding(batch, fillvalue=0):
batch.sort(key=lambda sample: len(sample[0]), reverse=True)
batch_samples, batch_labels = zip(*batch)
lengths = torch.tensor([len(indexes) for indexes in batch_samples])
# return list(zip(*itertools.zip_longest(*batch_samples, fillvalue=fillvalue))), lengths, batch_labels
# samples shape becomes: [max_len, batch_size]
return list(itertools.zip_longest(*batch_samples, fillvalue=fillvalue)), lengths, batch_labels
def collate_fn(batch):
padded_samples, lengths, batch_labels = zero_padding(batch, 0)
return torch.LongTensor(padded_samples), torch.LongTensor(lengths), torch.LongTensor(batch_labels)
def compute_loss(pred, tgt):
loss = criterion(pred, tgt)
pred = pred.max(dim=1)[1] # result of 'max' is tuple, dimension 1 is the indices, dimension 0 is the values
n_correct = pred.eq(tgt).sum().item()
return loss, n_correct
def max_norm(model: nn.Module, max_val=3):
for name, param in model.named_parameters():
if 'bias' not in name and len(param.shape) > 1:
param.renorm(2, 0, max_val)
# In[14]:
def train_epoch(model, training_data, optimizer):
model.train()
total_correct = 0
total = 0
for batch in tqdm(
training_data, mininterval=2, desc=' ---Training--- ',
leave=False):
seqs, seqs_lens, labels = map(lambda x: x.to(device), batch)
# optim.optimizer.zero_grad()
optimizer.zero_grad()
pred = model(seqs, seqs_lens)
loss, n_correct = compute_loss(pred, labels)
# loss.register_hook(lambda grad: print(grad))
loss.backward()
optimizer.step()
# max_norm(model, 3)
total += labels.size(0)
total_correct += n_correct
accr = total_correct / total
return accr
# In[15]:
def eval_epoch(model, validation_data):
model.eval() # disable dropout, batchnorm, etc.
total_correct = 0
total = 0
with torch.no_grad():
for batch in tqdm(validation_data, mininterval=2,
desc=' ---Validation--- ',
leave=False):
seqs, seqs_lens, labels = map(lambda x: x.to(device),
batch)
pred = model(seqs, seqs_lens)
_, n_correct = compute_loss(pred, labels)
total += labels.size(0)
total_correct += n_correct
accr = total_correct / total
return accr
# In[16]:
def test_epoch(model, test_data):
model.eval() # disable dropout, batchnorm, etc.
total_correct = 0
total = 0
with torch.no_grad():
for batch in tqdm(test_data, mininterval=2,
desc=' ---Test--- ',
leave=False):
seqs, seqs_lens, labels = map(lambda x: x.to(device),
batch)
pred = model(seqs, seqs_lens)
_, n_correct = compute_loss(pred, labels)
total += labels.size(0)
total_correct += n_correct
accr = total_correct / total
return accr
# In[17]:
def train(model, training_data, validation_data, test_data, optim, vocab_size, max_tensor_length):
val_accrs = []
test_accrs = []
for i in range(n_epoch):
start = time.time()
train_accr = train_epoch(model, training_data, optim)
trainWriter.write(str(train_accr)+"\n")
# trainWriter.write("\n")
trainWriter.flush()
print('\n - (Training) accuracy: {accu:3.3f} %, '
'elapse: {elapse:3.3f} min'.format(
accu=100 * train_accr,
elapse=(time.time() - start) / 60))
start = time.time()
val_accr = eval_epoch(model, validation_data)
validWriter.write(str(val_accr)+"\n")
# validWriter.write("\n")
validWriter.flush()
print('\n - (Validation) accuracy: {accu:3.3f} %, '
'elapse: {elapse:3.3f} min'.format(
accu=100 * val_accr,
elapse=(time.time() - start) / 60))
val_accrs.append(val_accr)
# print("Accuracies so far: ", val_accrs)
start = time.time()
test_accr = test_epoch(model, test_data)
testWriter.write(str(test_accr)+"\n")
# validWriter.write("\n")
testWriter.flush()
print('\n - (Test) accuracy: {accu:3.3f} %, '
'elapse: {elapse:3.3f} min'.format(
accu=100 * test_accr,
elapse=(time.time() - start) / 60))
test_accrs.append(test_accr)
# print("Accuracies so far: ", val_accrs)
model_state_dict = model.state_dict()
config = {'max_src_seq_len': max_tensor_length,
'vocab_size': vocab_size,
'dropout': p_dropout}
checkpoint = {'model': model_state_dict, 'epoch': i,
'config': config}
model_name = os.path.join(model_folder, "TypeModel.ckpt")
if val_accr >= max(val_accrs):
print("Save model at epoch ", i)
torch.save(checkpoint, model_name)
# In[18]:
def main():
samples, max_tensor_length = load_data()
training_samples, validation_samples, test_samples = random_split(
samples, [int(len(samples) * 0.6), int(len(samples) * 0.2),len(samples) - int(len(samples) * 0.6)-int(len(samples) * 0.2)])
train_loader = torch.utils.data.DataLoader(
training_samples,
num_workers=0,
batch_size=batch_size,
collate_fn=collate_fn,
shuffle=True)
valid_loader = torch.utils.data.DataLoader(
validation_samples,
num_workers=0,
batch_size=batch_size,
collate_fn=collate_fn,
)
test_loader = torch.utils.data.DataLoader(
test_samples,
num_workers=0,
batch_size=batch_size,
collate_fn=collate_fn,
)
# vocab size should be len(word2index)+1 since 0 is not used
detector = BugDetector(vocab_size, max_tensor_length, model_size, p_dropout)
optimizer = optim.Adam(detector.parameters(), lr=lr)
detector.to(device)
train(detector, train_loader, valid_loader,test_loader, optimizer, vocab_size, max_tensor_length)
# In[21]:
import ast
def predict(wanted):
model_path = os.path.join(model_folder, "TypeModel.ckpt")
checkpoint = torch.load(model_path,map_location=torch.device('cpu')) ##################
sp = tokenization.load_model('/home/smartscript/smartscript_web/py_checker/model_type_fulldata/spm.model')
# word2index, index2word = load_vocab()
# wanted = input("Please input var name:")
test_samples = []
fake_lables = []
tokens = tokenization.encode(sp, wanted)
# token_ids = []
# for token in tokens:
# token_ids.append(word2index.get(token, word2index['__UNK_TOKEN__']))
test_samples.append(tokens)
fake_lables.append(0)
test_samples = list(zip(test_samples, fake_lables))
data_loader = torch.utils.data.DataLoader(
test_samples,
num_workers=0,
batch_size=1,#len(test_samples),
collate_fn=collate_fn,
shuffle=False)
for batch in tqdm(
data_loader, mininterval=2, desc=' ---Predicting--- ',
leave=False):
seqs, seqs_lens, indices = map(lambda x: x.to(device), batch)
detector = BugDetector(checkpoint['config']['vocab_size'], checkpoint['config']['max_src_seq_len'], model_size,
checkpoint['config']['dropout'])
detector.load_state_dict(checkpoint['model'])
detector.to(device)
detector.eval()
pred = detector(seqs, seqs_lens)
pred2 = F.softmax(pred,dim=1)
# print(pred2.max(dim=0))
poss = str(pred2.max().data)[7:-1]
pred = pred.max(dim=1)[1]
return str(Types500[pred])+" - "+poss
def getResult(code):
# code = open("/home/smartscript/smartscript_web/static/py_checker/misc/type/1.py","r").readlines()
# code = "\n".join(code)
# print(code)
root = ""
try:
root = ast.parse(code)
except Exception as e:
return "AST ERROR: "+str(e)
names = sorted({(node.id,node.lineno) for node in ast.walk(root) if isinstance(node, ast.Name)})
# names = sorted({node.id for node in ast.walk(root) if isinstance(node, ast.Name)})
names2 = sorted({(node.attr,node.lineno) for node in ast.walk(root) if isinstance(node, ast.Attribute)})
names3 = sorted({(node.name,node.lineno) for node in ast.walk(root) if isinstance(node, ast.FunctionDef)})
namesAll = list(set(names+names3))
# red = RedBaron(code)
# method_reds = red.find_all('def')
# methods = []
# funcNames = []
# for method_red in method_reds:
# funcName = method_red.name
# # print(funcName)
# methods.append(method_red.dumps())
# funcNames.append(funcName)
# print(funcNames)
# funcNames.append("getint")
results = []
for func,lineno in namesAll:
results.append((predict(func),lineno))
ret = {}
for func,res in zip(namesAll,results):
if str(res[1]) not in ret:
ret[str(res[1])] = []
ret[str(res[1])].append([func[0],str(res[0])])
return ret
def test(name):
namesAll = []
namesAll.append([name,1])
namesAll.append(["test",1])
namesAll.append(["var",2])
results = []
for func,lineno in namesAll:
results.append((predict(func),lineno))
ret = {}
for func,res in zip(namesAll,results):
if str(res[1]) not in ret:
ret[str(res[1])] = []
ret[str(res[1])].append([func[0],str(res[0])])
return ret
# main()
if __name__ == "__main__":
print(test("PY2"))
```
#### File: smartscript_web/py_checker/normalize.py
```python
from string import Formatter
from redbaron import RedBaron, NameNode, GetitemNode, CallNode, DotNode
from baron import BaronError
import regex as re
def get_format_string_keywords(stmt: str):
"""
https://stackoverflow.com/questions/25996937/how-can-i-extract-keywords-from-a-python-format-string
:param stmt:
:return:
"""
fieldnames = [fname for _, fname, _, _ in Formatter().parse(stmt) if fname]
return fieldnames
def normalize_format_string(stmt: str):
"""
Only support python3 style format string so far.
How to check python2?
_(..., log(...), ...)
:param stmt:
:return:
"""
if re.search(r'(\{[\S ]*?\})', stmt) is None:
return stmt
try:
red = RedBaron(stmt)
except (BaronError, AssertionError) as err:
return None
name_nodes = red.find_all('name')
renaming_mapping = {}
cnt = 0
for name_node in name_nodes:
if isinstance(name_node.next, GetitemNode) or isinstance(name_node.next, DotNode):
if name_node.value in renaming_mapping:
name_node.value = renaming_mapping[name_node.value]
else:
renaming_mapping[name_node.value] = 'INSTANCE{}'.format(cnt)
name_node.value = renaming_mapping[name_node.value]
cnt += 1
elif isinstance(name_node.next, CallNode):
if name_node.value in renaming_mapping:
name_node.value = renaming_mapping[name_node.value]
else:
renaming_mapping[name_node.value] = 'FUNC{}'.format(cnt)
name_node.value = renaming_mapping[name_node.value]
cnt += 1
else:
if name_node.value in renaming_mapping:
name_node.value = renaming_mapping[name_node.value]
elif name_node.value.isdigit(): # skip constant number
continue
else:
renaming_mapping[name_node.value] = 'VAR{}'.format(cnt)
name_node.value = renaming_mapping[name_node.value]
cnt += 1
string_nodes = red.find_all('string')
for string_node in string_nodes:
matches = re.findall(r'(\{[\S ]*?\})', string_node.value)
if matches is None or len(matches) == 0:
continue
# new_val = string_node.value[0] + ''.join(matches) + string_node.value[-1]
new_val = '"' + ' '.join(matches) + '"'
# for old_id, new_id in renaming_mapping.items():
# # for now just use replace
# # Maybe we should find a way to get all identifiers in a format string later
# new_val = new_val.replace(old_id, new_id)
keywords = get_format_string_keywords(string_node.value)
for keyword in keywords:
if keyword in renaming_mapping:
new_val = new_val.replace(keyword, renaming_mapping[keyword])
else:
renaming_mapping[keyword] = 'VAR{}'.format(cnt)
new_val = new_val.replace(keyword, renaming_mapping[keyword])
cnt += 1
string_node.value = new_val
return red.dumps()
def normalize_dict_key(method: str) -> str:
"""
Remove statements unrelated to dict key access or field access
:param method:
:return:
"""
red = RedBaron(method)
# remain_stmts = []
method_red = red.find('def')
for i, stmt in enumerate(method_red.value):
if len(stmt.find_all('dictnode')) != 0 or len(stmt.find_all('getitem')) != 0 or len(stmt.find_all('dotnode')) != 0:
# remain_stmts.append(stmt.dumps())
continue
else:
del method_red.value[i]
# red.value = '\n'.join(remain_stmts)
return red.dumps()
```
#### File: misc/2/buggy.py
```python
class Figure:
""" Fake figure class """
def __init__(self, name=''):
self.name = name
def other(self):
""" another public func to eliminate R0903 check"""
def add(self, layout):
""" fake add_subplot function """
return self.name + layout
def misuse(plot, with_components):
""" Bug example """
if plot is True:
fig = Figure()
if with_components:
axis = fig.add(121)
axis2 = fig.add(122)
else:
axis = fig.add(111)
# ... some other statements
if plot is True:
print(axis2)
if with_components:
print(axis)
``` |
{
"source": "jnd77/CarND-Behavioral-Cloning-P3",
"score": 3
} |
#### File: jnd77/CarND-Behavioral-Cloning-P3/input_generator.py
```python
import csv
import cv2
import random
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
def load_samples(data_folder):
"""
This function returns the tuple train_samples, validation_samples
"""
# Open the csv with the data
with open(os.path.join(data_folder, 'driving_log.csv'), 'r') as f:
reader = csv.reader(f)
# Skip title row
next(reader)
samples = list(reader)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
return train_samples, validation_samples
def generator(data_folder, samples, batch_size=32):
"""
This function returns a generator based on the samples
(so they are not loaded into memory)
"""
num_samples = len(samples)
adjust_angle = 0.2
# Loop forever so the generator never terminates
while True:
random.shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset: offset + batch_size]
images = []
angles = []
for batch_sample in batch_samples:
angle = float(batch_sample[3])
is_center = False
# Pick between center, left or right
left_center_right = random.random()
if left_center_right < 0.3:
# Left
name = os.path.join(data_folder, 'IMG', batch_sample[1].split('/')[-1])
angle += adjust_angle
elif left_center_right > 0.7:
# Right
name = os.path.join(data_folder, 'IMG', batch_sample[2].split('/')[-1])
angle -= adjust_angle
else:
name = os.path.join(data_folder, 'IMG', batch_sample[0].split('/')[-1])
is_center = True
if not os.path.exists(name):
print(name)
image = cv2.cvtColor(cv2.imread(name), cv2.COLOR_BGR2RGB)
# Flip some of the time for center images
if is_center and random.random() < 0.5:
images.append(cv2.flip(image, 1))
angles.append(-angle)
else:
images.append(image)
angles.append(angle)
# trim image to only see section with road
features = np.array(images)
labels = np.array(angles)
yield shuffle(features, labels)
``` |
{
"source": "JNDanielson/mplstereonet",
"score": 3
} |
#### File: mplstereonet/examples/contour_angelier_data.py
```python
import matplotlib.pyplot as plt
import mplstereonet
import parse_angelier_data
def plot(ax, strike, dip, rake, **kwargs):
ax.rake(strike, dip, rake, 'ko', markersize=2)
ax.density_contour(strike, dip, rake, measurement='rakes', linewidths=1,
cmap='jet', **kwargs)
# Load data from Angelier, 1979
strike, dip, rake = parse_angelier_data.load()
# Setup a subplot grid
fig, axes = mplstereonet.subplots(nrows=3, ncols=4)
# Hide azimuth tick labels
for ax in axes.flat:
ax.set_azimuth_ticks([])
contours = [range(2, 18, 2), range(1, 21, 2), range(1, 22, 2)]
# "Standard" Kamb contouring with different confidence levels.
for sigma, ax, contour in zip([3, 2, 1], axes[:, 0], contours):
# We're reducing the gridsize to more closely match a traditional
# hand-contouring grid, similar to Kamb's original work and Vollmer's
# Figure 5. `gridsize=10` produces a 10x10 grid of density estimates.
plot(ax, strike, dip, rake, method='kamb', sigma=sigma,
levels=contour, gridsize=10)
# Kamb contouring with inverse-linear smoothing (after Vollmer, 1995)
for sigma, ax, contour in zip([3, 2, 1], axes[:, 1], contours):
plot(ax, strike, dip, rake, method='linear_kamb', sigma=sigma,
levels=contour)
template = r'$E={}\sigma$ Contours: ${}\sigma,{}\sigma,\ldots$'
ax.set_xlabel(template.format(sigma, *contour[:2]))
# Kamb contouring with exponential smoothing (after Vollmer, 1995)
for sigma, ax, contour in zip([3, 2, 1], axes[:, 2], contours):
plot(ax, strike, dip, rake, method='exponential_kamb', sigma=sigma,
levels=contour)
# Title the different methods
methods = ['Kamb', 'Linear\nSmoothing', 'Exponential\nSmoothing']
for ax, title in zip(axes[0, :], methods):
ax.set_title(title)
# Hide top-right axis... (Need to implement Diggle & Fisher's method)
axes[0, -1].set_visible(False)
# Schmidt contouring (a.k.a. 1%)
plot(axes[1, -1], strike, dip, rake, method='schmidt', gridsize=25,
levels=range(3, 20, 3))
axes[1, -1].set_title('Schmidt')
axes[1, -1].set_xlabel(r'Contours: $3\%,6\%,\ldots$')
# Raw data.
axes[-1, -1].set_azimuth_ticks([])
axes[-1, -1].rake(strike, dip, rake, 'ko', markersize=2)
axes[-1, -1].set_xlabel('N={}'.format(len(strike)))
plt.show()
```
#### File: mplstereonet/mplstereonet/kinematic_analysis.py
```python
import numpy as np
from shapely import geometry, ops
from . import stereonet_math
from .convenience_functions import subplots
from .analysis import azimuth_diff, apparent_dip
def daylight_envelope(strike, dip, segments=500):
"""
Calculates the longitude and latitude of `segments` points along the
stereonet projection of the daylight envelope of each slope face
with a given `strike` and `dip` in degrees.
Parameters
----------
strike : number or sequence of numbers
The strike of the plane(s) (slope face) in degrees, with dip direction
indicated by the azimuth (e.g. 315 vs. 135) specified following the
"right hand rule".
dip : number or sequence of numbers
The dip of the plane(s) in degrees.
segments : number or sequence of numbers
The number of points in the returned `lon` and `lat` arrays. Defaults
to 500 segments.
Returns
-------
lon, lat : arrays
`num_segments` x `num_strikes` arrays of longitude and latitude in
radians.
"""
# Get apparent dips from -90 to +90 (azimuth difference) from slope dip
# direction, i.e. +0 to +180 from slope strike. This essentially generates
# points defining the great-circle plane that represents the slope face
dl_bearings = np.linspace(0, 180, segments).reshape(segments, 1)
dl_plunges = apparent_dip(dip, 90-dl_bearings)
# More points needed for daylight envelope for steep slopes
if dip > 89:
# Crop original end sections at apparent dip = 0
dl_bearings = dl_bearings[1:-1]
# Create main section. End points cropped to avoid overlapping
b2 = dl_bearings[1:-1]
p2 = apparent_dip(dip, 90-b2)
# Get the apparent dip of the cropped end points (new connection points)
connect_dip = apparent_dip(dip, 90 - dl_bearings[0])
# Create the two new end sections, by generating points from
# apparent dip = 0 to the apparent dip of the connection points
p1 = np.linspace(0, connect_dip, segments)
b1 = 90 + azimuth_diff(dip, p1)
p3 = p1[::-1]
b3 = 90 - azimuth_diff(dip, p3)
# Connect the 3 sections
dl_bearings = np.vstack((b1, b2[::-1], b3))
dl_plunges = np.vstack((p1, p2[::-1], p3))
# Convert to lat,lon of poles
lon, lat = stereonet_math.pole(dl_bearings-90, dl_plunges)
lon, lat = stereonet_math._rotate(np.degrees(lon), np.degrees(lat), strike)
return lon, lat
def _curved_latlims(angle, segments=100):
"""
Calculates the longitude and latitude of `segments` points along the
stereonet projection of the "curved" lateral limit bounds in both
direction, for strike=0.
"""
# Plot lines of constant latitude
angle = np.radians(angle)
lat1 = -angle * np.ones(segments)
lon1 = np.linspace(-np.pi/2, np.pi/2, segments)
lat2 = angle * np.ones(segments)
lon2 = lon1.copy()
return lon1, lat1, lon2, lat2
def _shape(shape_type, strike=0, dip=0, angle=0):
"""
Prepare elements required to construct the kinematic analysis plots (e.g.
planes, cones) into Shapely geometries.
"""
if shape_type=='plane':
lon, lat = stereonet_math.plane(strike, dip)
return geometry.LineString(np.hstack((lon, lat)))
elif shape_type=='curved_latlims':
lon1, lat1, lon2, lat2 = _curved_latlims(angle)
return [geometry.LineString(np.vstack((lon1, lat1)).T),
geometry.LineString(np.vstack((lon2, lat2)).T)]
elif shape_type=='cone':
lon, lat = stereonet_math.cone(90, 0, angle, segments=200)
return geometry.Polygon(np.vstack((lon[0], lat[0])).T)
elif shape_type=='daylight_envelope':
lon, lat = daylight_envelope(strike, dip)
return geometry.Polygon(np.hstack((lon[:-1], lat[:-1])))
elif shape_type=='flexural_envelope':
p_lon, p_lat = stereonet_math.plane(0, 1e-9) # perimeter
sl_lon, sl_lat = stereonet_math.plane(strike, dip-angle) # slip limit
lon = np.vstack((p_lon, np.flip(sl_lon[1:-1])))
lat = np.vstack((p_lat, np.flip(sl_lat[1:-1])))
return geometry.Polygon(np.hstack((lon, lat)))
elif shape_type=='wedge_envelope':
sf_lon, sf_lat = stereonet_math.plane(0, dip) # slope face
sl_lon, sl_lat = stereonet_math.plane(0, angle) # slip limit
lon = np.vstack((sf_lon, np.flip(sl_lon[1:-1])))
lat = np.vstack((sf_lat, np.flip(sl_lat[1:-1])))
return geometry.Polygon(np.hstack((lon, lat)))
def _set_kws(kws, polygon=False, color='None', edgecolor='None', alpha=None,
label=None):
"""
Set default kws for the kinematic analysis plot elements
"""
kws = {} if kws is None else kws
if 'lw' not in kws:
kws.setdefault('linewidth', 1)
if polygon:
if 'color' not in kws:
if 'fc' not in kws:
kws.setdefault('facecolor', color)
if 'ec' not in kws:
kws.setdefault('edgecolor', edgecolor)
kws.setdefault('alpha', alpha)
else:
if 'c' not in kws:
kws.setdefault('color', color)
kws.setdefault('label', label)
return kws
def _rotate_shape(shape, strike):
"""
Rotate the Shapely geometries according to a certain strike and return the
latitude, longitude arrays.
"""
if shape.geom_type == 'LineString':
lon, lat = shape.xy
elif shape.geom_type == 'Polygon':
lon, lat = shape.exterior.xy
lon = np.degrees(lon)
lat = np.degrees(lat)
lon, lat = stereonet_math._rotate(lon, lat, strike)
return np.array([lon, lat])
class PlanarSliding(object):
"""
Kinematic analysis for planar sliding failures
Parameters
----------
strike : number
The strike of the slope face in degrees, with dip direction indicated
by the azimuth (e.g. 315 vs. 135) specified following the "right hand
rule".
dip : number (> 0 and <90)
The dip of the slope face in degrees.
fric_angle : number, default=35
The friction angle along the sliding discontinuities, in degrees.
Note that the slope dip should be steeper than the friction angle, or
else no planar sliding zones can be generated.
latlim : number (> 0 and <90), default=20
The lateral limits for typical planar sliding failures, counted from
the dip direction of the slope face in degrees. Daylighting
discontinuities dipping steeper than friction angle but outside the
lateral limits are considered to be less probable (i.e. secdondary
failure zones).
"""
def __init__(self, strike, dip, fric_angle=35, latlim=20):
self.strike = strike
self.dip = dip
self.fric_angle = fric_angle
self.latlim = latlim
if latlim <= 0 or latlim >= 90:
raise ValueError('latlim must be > 0 and < 90')
if dip <= 0 or dip > 90:
raise ValueError('dip must be > 0 and <= 90')
if dip <= fric_angle:
raise ValueError('No planar sliding zones generated as the input'
' slope dip is shallower than the friction angle')
def check_failure(self, strikes, dips, curved_lateral_limits=True):
"""
Check whether planar sliding failures are kinematically feasible on a
sequence of discontinuities
Parameters
----------
strikes : numbers
The strikes of the discontinuities in degrees, with dip direction
indicated by the azimuth (e.g. 315 vs. 135) specified following the
"right hand rule".
dips : numbers
The dip angles of the discontinuities in degrees.
curved_lateral_limits : boolean
Consider lateral limits as curved lines (align with small circles)
if set to 'True'. Straight lines through the stereonet center are
used if set to 'False'. Defaults to 'True'
Returns
----------
main: squence of booleans
True if the discontinuity is in the main planar sliding zone
secondary: squence of booleans
True if the discontinuity is in the secondary planar sliding zone
"""
strikes = (strikes-self.strike)%360
dipdirs = (strikes+90)%360
if curved_lateral_limits:
lons, lats = stereonet_math.pole(strikes, dips)
lats = np.degrees(lats)
within_lat = ((lats >= -self.latlim-1e-8) & # with tolerance
(lats <= self.latlim+1e-8))
else:
within_lat = ((dipdirs >= 90-self.latlim) &
(dipdirs <= 90+self.latlim))
llons, llats = stereonet_math.line(dips, dipdirs)
llons = np.degrees(llons)
daylight = llons >= 90-self.dip-1e-8 # with tolerance
fric_slip = dips >= self.fric_angle
main = within_lat & fric_slip & daylight
secondary = ~within_lat & fric_slip & daylight
return main, secondary
def plot_kinematic(self, secondary_zone=True, construction_lines=True,
slopeface=True, curved_lateral_limits=True,
main_kws=None, secondary_kws=None, lateral_kws=None,
friction_kws=None, daylight_kws=None, slope_kws=None,
ax=None):
"""
Generate the planar sliding kinematic analysis plot for pole vectors.
(Note: The discontinuity data to be used in conjunction with this plot
should be displayed as POLES)
This function plots the following elements on a StereonetAxes:
(1) main planar sliding zone
(2) secondary planar sliding zones
(3) construction lines, i.e. friction cone, lateral limits and
daylight envelope
(4) slope face
(2)-(4) are optioanl. The style of the elements above can be specified
with their kwargs, or on the artists returned by this function later.
Parameters
----------
secondary_zone : boolean
Plot the secondary zones if set to True. Defaults to 'True'.
construction_lines : boolean
Plot the construction lines if set to True. Defaults to 'True'.
slopeface : boolean
Plot the slope face as a great-circle plane on stereonet. Defaults
to 'True'.
curved_lateral_limits : boolean
Plot curved lateral limits (align with small circles) if set to
True, or else will be plotted as straight lines through the
stereonet center. Defaults to 'True'
main_kws : dictionary
kwargs for the main planar sliding zone
(matplotlib.patches.Polygon)
secondary_kws : dictionary
kwargs for the secondary planar sliding zones
(matplotlib.patches.Polygon)
lateral_kws : dictionary
kwargs for the lateral limits (matplotlib.lines.Line2D)
friction_kws : dictionary
kwargs for the friction cone (matplotlib.patches.Polygon)
daylight_kws : dictionary
kwargs for the daylight envelope (matplotlib.patches.Polygon)
slope_kws : dictionary
kwargs for the slope face (matplotlib.lines.Line2D)
ax : StereonetAxes
The StereonetAxes to plot on. A new StereonetAxes will be generated
if set to 'None'. Defaults to 'None'.
Returns
-------
result : dictionary
A dictionary mapping each element of the kinematic analysis plot to
a list of the artists created. The dictionary has the following
keys:
- `main` : the main planar sliding zone
- `secondary` : the two secondary planar sliding zones
- `slope` : the slope face
- `daylight` : the daylight envelope
- `friction` : the friction cone
- `lateral` : the two lateral limits
"""
# Convert the construction lines into shapely linestrings / polygons
daylight_envelope = _shape('daylight_envelope', strike=0, dip=self.dip)
friction_cone = _shape('cone', angle=self.fric_angle)
if curved_lateral_limits:
lat_lim1, lat_lim2 = _shape('curved_latlims', angle=self.latlim)
else:
lat_lim1 = _shape('plane', strike=90-self.latlim, dip=90)
lat_lim2 = _shape('plane', strike=90+self.latlim, dip=90)
# Get the failure zones (as shapely polygons) from geometry interaction
sliding_zone = daylight_envelope.difference(friction_cone)
split_polys = ops.split(sliding_zone,lat_lim1)
sec_zone_present = len(split_polys)==2
if sec_zone_present:
if split_polys[0].intersects(lat_lim2):
sliding_zone, sec_zone1 = split_polys
else:
sec_zone1, sliding_zone = split_polys
split_polys = ops.split(sliding_zone,lat_lim2)
if split_polys[0].touches(sec_zone1):
sliding_zone, sec_zone2 = split_polys
else:
sec_zone2, sliding_zone = split_polys
# Start plotting
if ax==None:
figure, axes = subplots(figsize=(8, 8))
else:
axes = ax
# List of artists to be output
main = []
secondary = []
slope = []
daylight = []
friction = []
lateral = []
# Plot the main planar sliding zone
main_kws = _set_kws(main_kws, polygon=True,
color='r', alpha=0.3,
label='Potential Planar Sliding Zone')
main.extend(axes.fill(
*_rotate_shape(sliding_zone, self.strike), **main_kws))
# Plot the secondary planar sliding zones
if secondary_zone and sec_zone_present:
secondary_kws = _set_kws(secondary_kws, polygon=True,
color='yellow', alpha=0.3,
label='Secondary Planar Sliding Zone')
secondary_kws2 = secondary_kws.copy()
secondary_kws2.pop('label')
secondary.extend(axes.fill(
*_rotate_shape(sec_zone1, self.strike), **secondary_kws))
secondary.extend(axes.fill(
*_rotate_shape(sec_zone2, self.strike),**secondary_kws2))
# Plot the slope face
if slopeface:
slope_kws = _set_kws(slope_kws, color='k', label='Slope Face')
slope.extend(axes.plane(self.strike, self.dip, **slope_kws))
# Plot the construction lines (daylight envelope, friction cone
# and lateral limits)
if construction_lines:
daylight_kws = _set_kws(daylight_kws, polygon=True, edgecolor='r')
friction_kws = _set_kws(friction_kws, polygon=True, edgecolor='r')
lateral_kws = _set_kws(lateral_kws, color='r')
lateral_kws2 = lateral_kws.copy()
lateral_kws2.pop('label')
daylight.extend(axes.fill(
*_rotate_shape(daylight_envelope, self.strike),**daylight_kws))
friction.extend(axes.fill(
*friction_cone.exterior.xy, **friction_kws))
lateral.extend(axes.plot(
*_rotate_shape(lat_lim1, self.strike), **lateral_kws))
lateral.extend(axes.plot(
*_rotate_shape(lat_lim2, self.strike), **lateral_kws2))
return dict(main=main, secondary=secondary, slope=slope,
daylight=daylight, friction=friction, lateral=lateral)
class WedgeSliding(object):
"""
Kinematic analysis for wedge sliding failures
Parameters
----------
strike : number
The strike of the slope face in degrees, with dip direction indicated
by the azimuth (e.g. 315 vs. 135) specified following the "right hand
rule".
dip : number (> 0 and <90)
The dip of the slope face in degrees.
fric_angle : number, default=35
The friction angle along the discontinuity intersections, in degrees.
Note that the slope dip should be steeper than the friction angle, or
else no wedge sliding zones can be generated.
"""
def __init__(self, strike, dip, fric_angle=35):
self.strike = strike
self.dip = dip
self.fric_angle = fric_angle
if self.dip <= self.fric_angle:
raise ValueError('No wedge sliding zones generated as the input'
' slope dip is shallower than the friction angle.')
def check_failure(self, bearings, plunges):
"""
Check whether wedge sliding failures are kinematically feasible for a
sequence of discontinuity intersection lines
Parameters
----------
bearing : number or sequence of numbers
The bearing (azimuth) of the instersection line(s) in degrees.
plunge : number or sequence of numbers
The plunge of the line(s) in degrees. The plunge is measured in
degrees downward from the end of the feature specified by the
bearing.
Returns
----------
main: squence of booleans
True if the discontinuity is in the main wedge sliding zone
secondary: squence of booleans
True if the discontinuity is in the secondary wedge sliding zone
"""
bearings = (bearings-self.strike)%360
llons, llats = stereonet_math.line(plunges, bearings)
llons = np.degrees(llons)
daylight = llons >= 90-self.dip-1e-8 # with tolerance
slip = plunges >= self.fric_angle
planar = llons <= 90-self.fric_angle+1e-8 # with tolerance
main = slip & daylight
secondary = ~slip & daylight & planar
return main, secondary
def plot_kinematic(self, secondary_zone=True, construction_lines=True,
slopeface=True, main_kws=None, secondary_kws=None,
friction_kws=None, fplane_kws=None, slope_kws=None,
ax=None):
"""
Generate the wedge sliding kinematic analysis plot for dip vectors.
(Note: This plot is used to analyze intersection lines between planes
of discontinuities, displayed as "line" features instead of poles)
This function plots the following elements on a StereonetAxes:
(1) main wedge sliding zone
(2) secondary wedge sliding zones
(3) construction lines, i.e. friction cone and friction plane
(4) slope face
(2)-(4) are optioanl. The style of the elements above can be specified
with their kwargs, or on the artists returned by this function later.
Parameters
----------
secondary_zone : boolean
Plot the secondary zones if set to True. Defaults to 'True'.
construction_lines : boolean
Plot the construction lines if set to True. Defaults to 'True'.
slopeface : boolean
Plot the slope face as a great-circle plane on stereonet. Defaults
to 'True'.
main_kws : dictionary
kwargs for the main wedge sliding zone
(matplotlib.patches.Polygon)
secondary_kws : dictionary
kwargs for the secondary wedge sliding zones
(matplotlib.patches.Polygon)
fplane_kws : dictionary
kwargs for the friction plane (matplotlib.lines.Line2D)
slope_kws : dictionary
kwargs for the slope face (matplotlib.lines.Line2D)
ax : StereonetAxes
The StereonetAxes to plot on. A new StereonetAxes will be generated
if set to 'None'. Defaults to 'None'.
Returns
-------
result : dictionary
A dictionary mapping each element of the kinematic analysis plot to
a list of the artists created. The dictionary has the following
keys:
- `main` : the main wedge sliding zone
- `secondary` : the secondary wedge sliding zones (it's one polygon)
- `slope` : the slope face
- `friction` : the friction cone
- `fplane` : the friction plane
"""
# Convert the construction lines into shapely linestrings / polygons
# -1e-2 to prevent secondary zone splitting into two polygons
friction_cone = _shape('cone', angle=90-self.fric_angle-1e-2)
envelope = _shape('wedge_envelope', strike=0,
dip=self.dip, angle=self.fric_angle)
# Get the failure zones (as shapely polygons) from geometry interaction
wedge_zone = envelope.intersection(friction_cone)
sec_zone = envelope.difference(friction_cone)
# Plotting
if ax==None:
figure, axes = subplots(figsize=(8, 8))
else:
axes = ax
# List of artists to be output
main = []
secondary = []
slope = []
friction = []
fplane = []
# Plot the main wedge sliding zone
main_kws = _set_kws(main_kws, polygon=True,
color='r', alpha=0.3,
label='Potential Wedge Sliding Zone')
main.extend(axes.fill(
*_rotate_shape(wedge_zone, self.strike), **main_kws))
# Plot the secondary main wedge sliding zones
if secondary_zone:
secondary_kws = _set_kws(secondary_kws, polygon=True,
color='yellow', alpha=0.3,
label='Secondary Wedge Sliding Zone')
secondary.extend(axes.fill(
*_rotate_shape(sec_zone, self.strike), **secondary_kws))
# Plot the slope face
if slopeface:
slope_kws = _set_kws(slope_kws, color='k', label='Slope Face')
slope.extend(axes.plane(self.strike, self.dip, **slope_kws))
# Plot the construction lines (friction cone and friction plane)
if construction_lines:
friction_kws = _set_kws(friction_kws, polygon=True, edgecolor='r')
fplane_kws = _set_kws(fplane_kws, color='r')
friction.extend(axes.fill(
*friction_cone.exterior.xy, **friction_kws))
fplane.extend(axes.plane(
self.strike, self.fric_angle, **fplane_kws))
return dict(main=main, secondary=secondary, slope=slope,
friction=friction, fplane=fplane)
class FlexuralToppling(object):
"""
Kinematic analysis for flexural toppling failures
Parameters
----------
strike : number
The strike of the slope face in degrees, with dip direction indicated
by the azimuth (e.g. 315 vs. 135) specified following the "right hand
rule".
dip : number (> 0 and <90)
The dip of the slope face in degrees.
fric_angle : number, default=35
The friction angle along the toppling discontinuities, in degrees.
Note that the slope dip should be steeper than the friction angle, or
else no toppling zones can be generated.
latlim : number (> 0 and <90), default=20
The lateral limits for typical flexural toppling failures, counted from
the dip direction of the slope face in degrees. Discontinuities dipping
steeper than the slip limit for flexural toppling but outside the
lateral limits are considered to be less probable (i.e. secdondary
failure zones).
"""
def __init__(self, strike, dip, fric_angle=35, latlim=20):
self.strike = strike
self.dip = dip
self.fric_angle = fric_angle
self.latlim = latlim
if latlim <= 0 :
raise ValueError('latlim must be greater than 0 degree.')
if latlim >= 90 :
raise ValueError('latlim must be smaller than 90 degrees.'
' Try 90-1e-9 if you really need to use 90.')
if self.dip <= self.fric_angle:
raise ValueError('No flexural toppling zones generated as the input'
' slope dip is shallower than the friction angle')
def check_failure(self, strikes, dips, curved_lateral_limits=True):
"""
Check whether flexural toppling failures are kinematically feasible on
a sequence of discontinuities
Parameters
----------
strikes : numbers
The strikes of the discontinuities in degrees, with dip direction
indicated by the azimuth (e.g. 315 vs. 135) specified following the
"right hand rule".
dips : numbers
The dip angles of the discontinuities in degrees.
curved_lateral_limits : boolean
Consider lateral limits as curved lines (align with small circles)
if set to 'True'. Straight lines through the stereonet center are
used if set to 'False'. Defaults to 'True'
Returns
----------
main: squence of booleans
True if the discontinuity is in the main flexural toppling zone
secondary: squence of booleans
True if the discontinuity is in the secondary flexural toppling zone
Note: This is not normally considered
"""
strikes = (strikes-self.strike)%360
dipdirs = (strikes+90)%360
lons, lats = stereonet_math.pole(strikes, dips)
lats = np.degrees(lats)
lons = np.degrees(lons)
if curved_lateral_limits:
within_lat = ((lats >= -self.latlim-1e-8) & # with tolerance
(lats <= self.latlim+1e-8))
else:
within_lat = ((dipdirs >= 270-self.latlim) &
(dipdirs <= 270+self.latlim))
fric_slip = lons >= 90-self.dip+self.fric_angle-1e-8 # with tolerance
main = within_lat & fric_slip
secondary = ~within_lat & fric_slip
return main, secondary
def plot_kinematic(self, secondary_zone=False, construction_lines=True,
slopeface=True, curved_lateral_limits=True,
main_kws=None, secondary_kws=None, lateral_kws=None,
slip_kws=None, slope_kws=None,
ax=None):
"""
Generate the flexural toppling kinematic analysis plot for pole vectors.
(Note: The discontinuity data to be used in conjunction with this plot
should be displayed as POLES)
This function plots the following elements on a StereonetAxes:
(1) main flexural toppling zone
(2) secondary flexural toppling zones (not normally considered)
(3) construction lines, i.e. slip limit and lateral limits
(4) slope face
(2)-(4) are optioanl. The style of the elements above can be specified
with their kwargs, or on the artists returned by this function later.
Parameters
----------
secondary_zone : boolean
Plot the secondary zones if set to True. This is not normally
considered. I just leave this option in case some users find it
useful. Defaults to 'False'.
construction_lines : boolean
Plot the construction lines if set to True. Defaults to 'True'.
slopeface : boolean
Plot the slope face as a great-circle plane on stereonet. Defaults
to 'True'.
curved_lateral_limits : boolean
Plot curved lateral limits (align with small circles) if set to
True, or else will be plotted as straight lines through the
stereonet center. Defaults to 'True'
main_kws : dictionary
kwargs for the main flexural toppling zone
(matplotlib.patches.Polygon)
secondary_kws : dictionary
kwargs for the secondary flexural toppling zones
(matplotlib.patches.Polygon)
lateral_kws : dictionary
kwargs for the lateral limits (matplotlib.lines.Line2D)
slip_kws : dictionary
kwargs for the slip limit (matplotlib.lines.Line2D)
slope_kws : dictionary
kwargs for the slope face (matplotlib.lines.Line2D)
ax : StereonetAxes
The StereonetAxes to plot on. A new StereonetAxes will be generated
if set to 'None'. Defaults to 'None'.
Returns
-------
result : dictionary
A dictionary mapping each element of the kinematic analysis plot to
a list of the artists created. The dictionary has the following
keys:
- `main` : the main flexural toppling zone
- `secondary` : the two secondary flexural toppling zones
- `slope` : the slope face
- `slip` : the slip limit
- `lateral` : the two lateral limits
"""
# Convert the construction lines into shapely linestrings / polygons
envelope = _shape('flexural_envelope', strike=0, dip=self.dip,
angle=self.fric_angle)
if curved_lateral_limits:
lat_lim1, lat_lim2 = _shape('curved_latlims', angle=self.latlim)
else:
lat_lim1 = _shape('plane', strike=90+self.latlim, dip=90)
lat_lim2 = _shape('plane', strike=90-self.latlim, dip=90)
# Get the failure zones (as shapely polygons) from geometry interaction
sec_zone1, toppling_zone = ops.split(envelope, lat_lim1)
toppling_zone, sec_zone2 = ops.split(toppling_zone, lat_lim2)
# Plotting
if ax==None:
figure, axes = subplots(figsize=(8, 8))
else:
axes = ax
# List of artists to be output
main = []
secondary = []
slope = []
slip = []
lateral = []
# Plot the main flexural toppling sliding zone
main_kws = _set_kws(main_kws, polygon=True,
color='r', alpha=0.3,
label='Potential Flexural Toppling Zone')
main.extend(axes.fill(
*_rotate_shape(toppling_zone, self.strike), **main_kws))
# Plot the secondary flexural toppling zones
if secondary_zone:
secondary_kws = _set_kws(secondary_kws, polygon=True,
color='yellow', alpha=0.3,
label='Secondary Flexural Toppling Zone')
secondary_kws2 = secondary_kws.copy()
secondary_kws2.pop('label')
secondary.extend(axes.fill(
*_rotate_shape(sec_zone1, self.strike), **secondary_kws))
secondary.extend(axes.fill(
*_rotate_shape(sec_zone2, self.strike), **secondary_kws2))
# Plot the slope face
if slopeface:
slope_kws = _set_kws(slope_kws, color='k', label='Slope Face')
slope.extend(axes.plane(self.strike, self.dip, **slope_kws))
# Plot the construction lines (friction cone and slip limit)
if construction_lines:
slip_kws = _set_kws(slip_kws, color='r')
lateral_kws = _set_kws(lateral_kws, color='r')
lateral_kws2 = lateral_kws.copy()
lateral_kws2.pop('label')
slip.extend(axes.plane(
self.strike, self.dip-self.fric_angle, **slip_kws))
lateral.extend(axes.plot(
*_rotate_shape(lat_lim1, self.strike), **lateral_kws))
lateral.extend(axes.plot(
*_rotate_shape(lat_lim2, self.strike), **lateral_kws2))
return dict(main=main, secondary=secondary, slope=slope,
slip=slip, lateral=lateral)
``` |
{
"source": "jndean/railway",
"score": 3
} |
#### File: railway/lib/pegparsing.py
```python
from collections import namedtuple
Token = namedtuple("Token", ["type", "string", "line", "col"])
class BaseParser:
def __init__(self, token_generator):
self.gen = token_generator
self.tokens = []
self.token_pos = 0
self.memos = {}
def mark(self):
return self.token_pos
def reset(self, pos):
self.token_pos = pos
def peek_token(self):
if self.token_pos == len(self.tokens):
self.tokens.append(next(self.gen))
return self.tokens[self.token_pos]
def get_token(self):
token = self.peek_token()
self.token_pos += 1
return token
def expect(self, arg):
token = self.peek_token()
if token and token.type == arg:
return self.get_token()
return None
def get_last_tokens(self, n=1):
return self.tokens[-n:]
def memoise(func):
def memoise_wrapper(self, *args):
pos = self.mark()
memo = self.memos.get(pos)
if memo is None:
memo = self.memos[pos] = {}
key = (func, args)
if key in memo:
res, endpos = memo[key]
self.reset(endpos)
else:
res = func(self, *args)
endpos = self.mark()
memo[key] = res, endpos
return res
return memoise_wrapper
def memoise_left_recursive(func):
def memoise_left_rec_wrapper(self, *args):
pos = self.mark()
memo = self.memos.get(pos)
if memo is None:
memo = self.memos[pos] = {}
key = (func, args)
if key in memo:
res, endpos = memo[key]
self.reset(endpos)
else:
memo[key] = lastres, lastpos = None, pos
while True:
self.reset(pos)
res = func(self, *args)
endpos = self.mark()
if endpos <= lastpos:
break
memo[key] = lastres, lastpos = res, endpos
res = lastres
self.reset(lastpos)
return res
return memoise_left_rec_wrapper
``` |
{
"source": "jndevanshu/tagger",
"score": 3
} |
#### File: jndevanshu/tagger/my_loader.py
```python
import os
import re
import codecs
import random
from utils import create_dico, create_mapping, zero_digits, create_input
from utils import iob2, iob_iobes, iob_bin
from collections import defaultdict
from ccg_nlpy.core.text_annotation import TextAnnotation
from ccg_nlpy.core.view import View
def load_sentences(path, lower, zeros, ratio=1.0):
"""
Load sentences. A line must contain at least a word and its tag.
Sentences are separated by empty lines.
"""
file_list = os.listdir(path)
sentences = []
label_list = set()
for doc in file_list:
print("Reading " + os.path.join(path, doc))
document = TextAnnotation(json_str=open(os.path.join(path, doc)).read())
ner_labels = document.view_dictionary['NER_CONLL'].cons_list
if ner_labels is None:
ner_labels = []
ner_dict = {}
for ner_constituent in ner_labels:
for span in range(ner_constituent['start'], ner_constituent['end']):
if span-ner_constituent['start'] == 0:
ner_dict[span] = "B-" + ner_constituent['label']
else:
ner_dict[span] = "I-" + ner_constituent['label']
if ner_dict[span] not in label_list:
label_list.add(ner_dict[span])
print(ner_dict[span])
try:
sentences_cons_list = document.view_dictionary['SENTENCE'].cons_list
except KeyError as e:
sentences_cons_list = []
start = 0
for end in document.sentence_end_position:
sent = " ".join(document.tokens[start:end])
sentences_cons_list.append({'tokens': sent, 'start': start, 'end': end})
start = end
for sent_constituent in sentences_cons_list:
sentence = []
sent = re.split("\s+", sent_constituent['tokens'])
start = sent_constituent['start']
end = sent_constituent['end']
for token, span in zip(sent, range(start, end)):
if span in ner_dict:
sentence.append([token, ner_dict[span]])
else:
sentence.append([token, "O"])
if len(sentence) > 1:
sentences.append(sentence)
random.shuffle(sentences)
train_sentences = sentences[:int(ratio*len(sentences))]
dev_sentences = sentences[int(ratio*len(sentences)):]
return train_sentences, dev_sentences
def update_tag_scheme(sentences, tag_scheme):
"""
Check and update sentences tagging scheme to IOB2.
Only IOB1 and IOB2 schemes are accepted.
"""
for i, s in enumerate(sentences):
tags = [w[-1] for w in s]
# Check that tags are given in the IOB format
if not iob2(tags):
s_str = '\n'.join(' '.join(w) for w in s)
raise Exception('Sentences should be given in IOB format! ' +
'Please check sentence %i:\n%s' % (i, s_str))
if tag_scheme == 'iob':
# If format was IOB1, we convert to IOB2
for word, new_tag in zip(s, tags):
word[-1] = new_tag
elif tag_scheme == 'iobes':
new_tags = iob_iobes(tags)
for word, new_tag in zip(s, new_tags):
word[-1] = new_tag
elif tag_scheme == 'bin':
new_tags = iob_bin(tags)
for word, new_tag in zip(s, new_tags):
word[-1] = new_tag
else:
raise Exception('Unknown tagging scheme!')
def word_mapping(sentences, lower):
"""
Create a dictionary and a mapping of words, sorted by frequency.
"""
words = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]
dico = create_dico(words)
dico['<UNK>'] = 10000000
word_to_id, id_to_word = create_mapping(dico)
print "Found %i unique words (%i in total)" % (
len(dico), sum(len(x) for x in words)
)
return dico, word_to_id, id_to_word
def char_mapping(sentences):
"""
Create a dictionary and mapping of characters, sorted by frequency.
"""
chars = ["".join([w[0] for w in s]) for s in sentences]
dico = create_dico(chars)
char_to_id, id_to_char = create_mapping(dico)
print "Found %i unique characters" % len(dico)
return dico, char_to_id, id_to_char
def tag_mapping(sentences):
"""
Create a dictionary and a mapping of tags, sorted by frequency.
"""
tags = [[word[-1] for word in s] for s in sentences]
dico = create_dico(tags)
tag_to_id, id_to_tag = create_mapping(dico)
print "Found %i unique named entity tags" % len(dico)
return dico, tag_to_id, id_to_tag
def brown_mapping(filename):
"""
Create Brown mapping
"""
with open(filename) as f:
data = f.readlines()
data = [(re.split("\s+", line.strip())[1], re.split("\s+", line.strip())[0]) for line in data if len(line.strip()) > 0]
dict_brown = defaultdict(lambda: 0)
brown_to_id = {"<UNK>": 0}
id_to_brown = {0: "<UNK>"}
idx = 0
for (entity, tag) in data:
if tag not in brown_to_id:
brown_to_id[tag] = idx + 1
id_to_brown[idx + 1] = tag
idx += 1
dict_brown[entity.lower()] = brown_to_id[tag]
return dict_brown, brown_to_id, id_to_brown
def gazetteer_mapping(filename):
"""
Create gazetteer mapping
"""
with open(filename) as f:
data = f.readlines()
data = [(line.strip().split(";")[1], line.strip().split(";")[2]) for line in data if len(line.strip()) > 0]
dict_gtr = defaultdict(lambda: 0)
gtr_to_id = {"<UNK>": 0, "G": 1}
id_to_gtr = {0: "<UNK>", 1: "G"}
idx = 0
# for (_, tag) in data:
# if "B-" + tag not in gtr_to_id:
# gtr_to_id["B-" + tag] = idx + 1
# id_to_gtr[idx + 1] = "B-" + tag
# idx += 1
# if "I-" + tag not in gtr_to_id:
# gtr_to_id["I-" + tag] = idx + 1
# id_to_gtr[idx + 1] = "I-" + tag
# idx += 1
for (entity, tag) in data:
token = re.split("\s+", entity)
for idx in range(len(token)):
if idx == 0:
dict_gtr[token[idx].lower()] = 1 # gtr_to_id["B-" + tag]
else:
dict_gtr[token[idx].lower()] = 1 # gtr_to_id["I-" + tag]
return dict_gtr, gtr_to_id, id_to_gtr
def cap_feature(s):
"""
Capitalization feature:
0 = low caps
1 = all caps
2 = first letter caps
3 = one capital (not first letter)
"""
if s.lower() == s:
return 0
elif s.upper() == s:
return 1
elif s[0].upper() == s[0]:
return 2
else:
return 3
def gazetteer_feature(s, gazetteer_list):
if s.lower() in gazetteer_list:
return gazetteer_list[s.lower()]
else:
return 0
def brown_feature(s, brown_dict):
if s.lower() in brown_dict:
return brown_dict[s.lower()]
else:
return 0
def prepare_sentence(str_words, word_to_id, char_to_id, gazetteer_list={}, brown_dict={}, l1_model=None, l1_f_eval=None, lower=False):
"""
Prepare a sentence for evaluation.
"""
def f(x): return x.lower() if lower else x
words = [word_to_id[f(w) if f(w) in word_to_id else '<UNK>']
for w in str_words]
chars = [[char_to_id[c] for c in w if c in char_to_id]
for w in str_words]
caps = [cap_feature(w) for w in str_words]
gazetteer = [gazetteer_feature(w, gazetteer_list) for w in str_words]
brown = [brown_feature(w, brown_dict) for w in str_words]
sent = {
'str_words': str_words,
'words': words,
'chars': chars,
'caps': caps,
'gazetteer': gazetteer,
'brown': brown
}
if l1_model is not None:
input = create_input(sent, l1_model.parameters, False)
try:
if l1_model.parameters['crf']:
y_preds = np.array(f_eval(*input))[1:-1]
else:
y_preds = f_eval(*input).argmax(axis=1)
y_preds = [l1_model.id_to_tag[y_pred] for y_pred in y_preds]
except Exception as e:
y_preds = ["O"] * len(str_words)
sent['pred'] = [0 if y_pred == "O" else 1 for y_pred in y_preds]
return sent
def prepare_dataset(sentences, word_to_id, char_to_id, gazetteer_list, brown_dict, tag_to_id, l1_model, l1_f_eval, lower=False):
"""
Prepare the dataset. Return a list of lists of dictionaries containing:
- word indexes
- word char indexes
- tag indexes
"""
def f(x): return x.lower() if lower else x
data = []
for s in sentences:
str_words = [w[0] for w in s]
words = [word_to_id[f(w) if f(w) in word_to_id else '<UNK>']
for w in str_words]
# Skip characters that are not in the training set
chars = [[char_to_id[c] for c in w if c in char_to_id]
for w in str_words]
caps = [cap_feature(w) for w in str_words]
gazetteer = [gazetteer_feature(w, gazetteer_list) for w in str_words]
brown = [brown_feature(w, brown_dict) for w in str_words]
sent = {
'str_words': str_words,
'words': words,
'chars': chars,
'caps': caps,
'gazetteer': gazetteer,
'brown': brown,
}
if l1_model is not None:
input = create_input(sent, l1_model.parameters, False)
try:
if l1_model.parameters['crf']:
y_preds = np.array(f_eval(*input))[1:-1]
else:
y_preds = f_eval(*input).argmax(axis=1)
y_preds = [l1_model.id_to_tag[y_pred] for y_pred in y_preds]
except Exception as e:
y_preds = ["O"] * len(str_words)
sent['pred'] = [0 if y_pred == "O" else 1 for y_pred in y_preds]
tags = [tag_to_id[w[-1]] for w in s]
sent['tags'] = tags
data.append(sent)
return data
def augment_with_pretrained(dictionary, ext_emb_path, words):
"""
Augment the dictionary with words that have a pretrained embedding.
If `words` is None, we add every word that has a pretrained embedding
to the dictionary, otherwise, we only add the words that are given by
`words` (typically the words in the development and test sets.)
"""
print 'Loading pretrained embeddings from %s...' % ext_emb_path
assert os.path.isfile(ext_emb_path)
# Load pretrained embeddings from file
pretrained = set([
line.rstrip().split()[0].strip()
for line in codecs.open(ext_emb_path, 'r', 'utf-8', errors='ignore')
if len(ext_emb_path) > 0
])
# We either add every word in the pretrained file,
# or only words given in the `words` list to which
# we can assign a pretrained embedding
if words is None:
for word in pretrained:
if word not in dictionary:
dictionary[word] = 0
else:
for word in words:
if any(x in pretrained for x in [
word,
word.lower(),
re.sub('\d', '0', word.lower())
]) and word not in dictionary:
dictionary[word] = 0
word_to_id, id_to_word = create_mapping(dictionary)
return dictionary, word_to_id, id_to_word
``` |
{
"source": "JNDib/brdashproject",
"score": 2
} |
#### File: brdashsite/batchrecords/utils.py
```python
import io
from xhtml2pdf import pisa
from django.template.loader import get_template
from django.template import Context
from django.http import HttpResponse
from cgi import escape
def render_to_pdf(template_src, context_dict):
template = get_template(template_src)
context = Context(context_dict)
html = template.render(context)
result = io.StringIO()
pdf = pisa.pisaDocument(io.StringIO(html.encode("ISO-8859-1")), result)
if not pdf.err:
return HttpResponse(result.getvalue(), content_type='application/pdf')
return HttpResponse('We had some errors<pre>%s</pre>' % escape(html))
``` |
{
"source": "JNDib/HarambeBot",
"score": 3
} |
#### File: JNDib/HarambeBot/harambe.py
```python
import json
import requests
import sys
import bs4
import pprint
import re
from http.server import BaseHTTPRequestHandler, HTTPServer
import os
import time
from random import randint
HOST_NAME = '0.0.0.0'
PORT_NUMBER = int(os.environ.get('PORT', 9000))
baseUrl = 'https://api.groupme.com/v3'
accessToken = '' # your access Token
tokenUrl = '?token=' + accessToken
bot_id = '' # insert your bot id
# Send HTTP POST request to post to group.
def post_group(content, pic_url):
postdo_post = '/bots/post'
resUrl = baseUrl + postdo_post
params = {'bot_id' : bot_id, 'text' : content, 'picture_url' : pic_url}
res = requests.post(resUrl, params)
res.raise_for_status()
def get_weather(city):
# uses google geocoding api to find a latitude and longitude for the city supplied
# uses the latitude and longitude in Dark Sky's weather api to get weather for the specific location
GOOGLEAPIKEY = '' # your key for Google's geocoding API
DARKSKYAPIKEY = '' # your key for Dark Sky's weather data API
city = city.replace(' ', '+') # replaces the space if state is also given e.g. 'gainesville, fl'
googlebaseURL = 'https://maps.googleapis.com/maps/api/geocode/json?address=%s&key=%s' % (city, GOOGLEAPIKEY) # URL for googles geocoding api
res = requests.get(googlebaseURL)
res.raise_for_status()
geocodeData = json.loads(res.text)
geocode = geocodeData['results'][0]['geometry']['location']
latitude = geocode['lat']
longitude = geocode['lng']
darkskybaseURL = 'https://api.darksky.net/forecast/%s/%s,%s' % (DARKSKYAPIKEY, latitude, longitude)
res = requests.get(darkskybaseURL)
res.raise_for_status()
weatherData = json.loads(res.text)
degree_sign= u'\N{DEGREE SIGN}' # degree unicode character
post_group(weatherData['currently']['summary'] + ', ' + str(weatherData['currently']['apparentTemperature']) + degree_sign + 'F. ' + weatherData['hourly']['summary'] + '\n\n' + weatherData['daily']['summary'], None)
def all_league_scores():
# Posts all league scores for your ESPN fantasy football league
leagueId = '' # insert your ESPN leagueId
seasonId = '' # insert season year
scoreboardUrl = 'http://games.espn.com/ffl/scoreboard?leagueId=%s&seasonId=%s' % (leagueId, seasonId)
res = requests.get(scoreboardUrl)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, 'html.parser')
tag = soup.find_all(class_=['score', 'name', 'owners'])
message = tag[0].get_text()+': '+tag[2].get_text()+'\n'+tag[3].get_text()+': '+tag[5].get_text()+'\n\n'+tag[6].get_text()+': '+tag[8].get_text()+'\n'+tag[9].get_text()+': '+tag[11].get_text()+'\n\n'+tag[12].get_text()+': '+tag[14].get_text()+'\n'+tag[15].get_text()+': '+tag[17].get_text()+'\n\n'+tag[18].get_text()+': '+tag[20].get_text()+'\n'+tag[21].get_text()+': '+tag[23].get_text()+'\n\n'+tag[24].get_text()+': '+tag[26].get_text()+'\n'+tag[27].get_text()+': '+tag[29].get_text()+'\n\n'+tag[30].get_text()+': '+tag[32].get_text()+'\n'+tag[33].get_text()+': '+tag[35].get_text()
post_group(message, None)
def get_matchup_score(user_id):
# posts the matchup score from ESPN for the user who asks
groupMembers = {}
""" ^ dictionary with key equal to groupme userID (from API)
and value equal to members name e.g {'000000':'Walter'} """
leagueId = '' # insert your ESPN leagueId
seasonId = '' # insert season year
scoreboardUrl = 'http://games.espn.com/ffl/scoreboard?leagueId=%s&seasonId=%s' % (leagueId, seasonId)
res = requests.get(scoreboardUrl)
res.raise_for_status()
soup = bs4.BeautifulSoup(res.text, 'html.parser')
scores_tag = soup.find_all(class_='score')
names_tag = soup.find_all(class_='name')
owners_tag = soup.find_all(class_='owners')
score_content_line1 = None
score_content_line2 = None
for i in range(0, 12):
if owners_tag[i].get_text().lower().split(' ')[0] == groupMembers[user_id].lower().split(' ')[0]:
score_content_line1 = names_tag[i].get_text() + ': ' + scores_tag[i].get_text()
if i in range(1, 12, 2):
score_content_line2 = names_tag[i-1].get_text() + ': ' + scores_tag[i-1].get_text()
else:
score_content_line2 = names_tag[i+1].get_text() + ': ' + scores_tag[i+1].get_text()
post_group(str(score_content_line1) + '\n' + str(score_content_line2), None)
i += 1
def get_last_message():
if 'rip harambe' in message_lower:
rip_harambe_list = ['https://img.ifcdn.com/images/ccb85b3923314524e7203fe0e4284bad6e1b01e42eda8550b9b8b7988cf6de5b_1.jpg', 'https://i.redd.it/33d6a5it8eix.png', 'http://i1.kym-cdn.com/photos/images/original/001/155/744/c2f.jpg', 'https://static1.squarespace.com/static/570a0f1f4c2f85652de746c9/570a10085559863dc7612dc9/57c8eb429de4bb1598ee2b40/1472873504170/HARAMBE+2.0+(CLEAN).jpg?format=1500w', 'https://getonfleek.com/pub/media/catalog/product/d/i/dicks_out_for_harambe_crewneck.png', 'http://i2.kym-cdn.com/photos/images/original/001/155/662/8c5.jpg', 'https://pics.onsizzle.com/trending-stephen-hawking-renowned-physicist-makes-1st-facebook-post-since-3030807.png', 'https://img.ifcdn.com/images/159f2467d9d557ab49311de6462365a2bd21804ad6ea135ca56aaa8b06599280_1.jpg', 'http://i.imgur.com/y5WoTDN.jpg']
rip_harambe_length = len(rip_harambe_list)-1
i = randint(0, rip_harambe_length)
post_group(None, rip_harambe_list[i])
elif 'harambe' in message_lower and 'my fantasy' in message_lower:
get_matchup_score(my_id)
elif 'harambe' in message_lower and 'league scores' in message_lower:
all_league_scores()
elif 'harambe' in message_lower and 'resurrect' in message_lower:
post_group(None, 'https://i.groupme.com/1200x799.jpeg.1c2ae1fd84214f9681cccfa65650bd42')
elif my_id == '': # insert a friends user_id for the bot to troll every so often
i = randint(1, 50)
if i == 25:
post_group("", None) # insert the message for the bot to post
elif 'harambe' in message_lower and 'weather' in message_lower:
message_lower_index = message_lower.index('in')
target = message_lower[message_lower_index + 3:].strip('?')
get_weather(target)
elif 'harambe' in message_lower and my_id != '13439387':
post_group(sender_name + ', come in my cage you neanderthal and see what happens.', None)
class RequestHandler(BaseHTTPRequestHandler):
def do_POST(s):
# Respond to a post request
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(bytes("Response success", "utf8"))
content = int(s.headers.get_all('Content-Length')[0])
post_body_bytes = s.rfile.read(content)
post_body = post_body_bytes.decode('ascii')
recentdata = json.loads(post_body)
global original_message, message_lower, sender_name_lower, my_id, sender_name
original_message = recentdata['text']
message_lower = original_message.lower()
sender_name = recentdata['name']
print(sender_name)
print(original_message)
sender_name_lower = sender_name.lower()
my_id = str(recentdata['user_id'])
print(my_id)
get_last_message()
if __name__ == '__main__':
server_class = HTTPServer
handler_class = BaseHTTPRequestHandler
server_address = (HOST_NAME, PORT_NUMBER)
httpd = server_class(server_address, RequestHandler)
print(time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
print(time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER))
``` |
{
"source": "jnduli/edx_anaconda_machine_learning",
"score": 3
} |
#### File: edx_anaconda_machine_learning/Module6/assignment3.py
```python
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.svm import SVC
from sklearn import preprocessing
from sklearn.decomposition import PCA
from sklearn.manifold import Isomap
def test_PCA(df, y):
best_score = 0
for i in range(4,15):
pca = PCA(n_components = i)
pca.fit(df)
d = pca.transform(df)
score = get_best_score(d, y)
if score>best_score:
best_score=score
return best_score
def test_Isomap(df,y):
best_score =0
for i in range(2,6):
for j in range(4,7):
iso = Isomap(n_neighbors=i, n_components=j)
iso.fit(df)
d = iso.transform(df)
score= get_best_score(d,y)
if score>best_score:
best_score = score
return best_score
def test_pca_isomap(df,y):
pca = test_PCA(df, y)
iso = test_Isomap(df, y)
if pca>iso:
return pca
else:
return iso
def get_best_score(X,y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=7)
best_score =0
for i in np.arange(0.05, 2.05, 0.05):
for j in np.arange(0.001, 0.101, 0.001):
svc = SVC(C=i, gamma=j)
svc.fit(X_train, y_train)
score = svc.score(X_test, y_test)
#aprint score
if score > best_score:
best_score = score
return best_score
X = pd.read_csv('Datasets/parkinsons.data')
y = X['status'].copy()
X.drop(labels=['name','status'], axis=1, inplace=True)
from sklearn.cross_validation import train_test_split
df = X
T= preprocessing.StandardScaler().fit_transform(df)
print "Standard Scaler :" , test_pca_isomap(T,y)
#jT= preprocessing.MinMaxScaler().fit_transform(df)
#print "Min Max Scaler ", get_best_score(T,y)
#T = preprocessing.MaxAbsScaler().fit_transform(df)
#print "Max abs scaler :", get_best_score(T,y)
#T = preprocessing.Normalizer().fit_transform(df)
#print "Normalizer :", get_best_score(T,y)
``` |
{
"source": "jnecus/nilearn",
"score": 2
} |
#### File: nilearn/plotting/__init__.py
```python
import os
import sys
###############################################################################
# Make sure that we don't get DISPLAY problems when running without X on
# unices
def _set_mpl_backend():
# We are doing local imports here to avoid polluting our namespace
try:
import matplotlib
except ImportError:
from .._utils.testing import skip_if_running_tests
# No need to fail when running tests
skip_if_running_tests('matplotlib not installed')
raise
else:
from ..version import (_import_module_with_version_check,
OPTIONAL_MATPLOTLIB_MIN_VERSION)
# When matplotlib was successfully imported we need to check
# that the version is greater that the minimum required one
_import_module_with_version_check('matplotlib',
OPTIONAL_MATPLOTLIB_MIN_VERSION)
current_backend = matplotlib.get_backend().lower()
if 'inline' in current_backend or 'nbagg' in current_backend:
return
# Set the backend to a non-interactive one for unices without X
if (os.name == 'posix' and 'DISPLAY' not in os.environ
and not (sys.platform == 'darwin'
and 'macosx' in current_backend)
):
matplotlib.use('Agg')
_set_mpl_backend()
###############################################################################
from . import cm
from .img_plotting import plot_img, plot_anat, plot_epi, \
plot_roi, plot_stat_map, plot_glass_brain, plot_connectome, \
plot_connectome_strength, plot_prob_atlas, show
from .find_cuts import find_xyz_cut_coords, find_cut_slices, \
find_parcellation_cut_coords, find_probabilistic_atlas_cut_coords
from .matrix_plotting import plot_matrix
from .html_surface import view_surf, view_img_on_surf
from .html_stat_map import view_img
from .html_connectome import view_connectome, view_markers
from .surf_plotting import plot_surf, plot_surf_stat_map, plot_surf_roi
__all__ = ['cm', 'plot_img', 'plot_anat', 'plot_epi',
'plot_roi', 'plot_stat_map', 'plot_glass_brain',
'plot_connectome_strength', 'plot_connectome', 'plot_prob_atlas',
'find_xyz_cut_coords', 'find_cut_slices',
'show', 'plot_matrix', 'view_surf', 'view_img_on_surf',
'view_img', 'view_connectome', 'view_markers',
'find_parcellation_cut_coords', 'find_probabilistic_atlas_cut_coords',
'plot_surf', 'plot_surf_stat_map', 'plot_surf_roi'
]
``` |
{
"source": "jnecus/ukbiobank-tools",
"score": 3
} |
#### File: ukbiobank/filtering/filtering.py
```python
import ukbiobank
# import ukbiobank.utils
# from ukbiobank.utils import fieldNamesToIds, addFields
def filterInstancesArrays(ukbio=None, df=None, instances=None, arrays=None):
"""
Parameters
----------
ukbio : ukbio object, mandatory
df : pandas dataframe (generated using ukbio loadCsv)
instances : List of integers. Default is none (include all instances)
arrays : List of integers. Default is none (include all arrays)
Returns
-------
Dataframe with datafields filtered for selected instances/arrays : Pandas dataframe
"""
# If instances is not None and is not list, convert to list
if instances is not None and not isinstance(instances, list):
instances = [instances]
# if all columns contain alphanumeric characters then convert to field ID
if df.columns.str.contains("[a-z]").all():
df = ukbiobank.utils.fieldNamesToIds(ukbio, df)
field_instance_array_df_temp = ukbio.field_instance_array_df.copy()
# filtering by instances
if instances is not None:
field_instance_array_df_temp = field_instance_array_df_temp[
field_instance_array_df_temp["instance"].isin(instances)
]
# filtering by arrays
if arrays is not None:
field_instance_array_df_temp = field_instance_array_df_temp[
field_instance_array_df_temp["array"].isin(arrays)
]
# Finding intersection of "Dataframe" field_instance_arrays & "ALL" field_instance_arrays
cols = list(
set(df.columns.tolist())
& set(field_instance_array_df_temp["id_instance_array"].tolist())
)
if "eid" not in cols:
cols.append("eid")
return df[cols]
def filterByField(
ukbio=None, df=None, fields_to_include=None, instances=[0, 1, 2, 3], arrays=None
):
"""
Parameters
----------
ukbio : ukbio object, mandatory
df : pandas dataframe (currently only accepts FieldID headers as column headers)
fields_to_include: Dictionary whereby keys: 'fields to include', values:'values to include'
*FIELDS IN FIELDS_TO_INCLUDE MUST BE IN FIELD_ID FORM* e.g. '20002' (not 'Self-reported Illness') *
*VALUES IN FIELDS_TO_INCLUDE MUST BE IN CODED FORM* e.g. '1074', (not 'angina') *
instances : list of integers, Default is [0,1,2,3] (include all instances)
arrays : list of integers
Returns
-------
Pandas dataframe with data-fields filtered for selected fields, values, instances, arrays.
*This function uses 'OR' logic, i.e. if any of the values/fields included are present then they will be included*
"""
# Account for df = None, or if fields are not found in df, then add them
if df is None:
# Add all fields_to include
df = ukbiobank.utils.addFields(
ukbio=ukbio, fields=list(fields_to_include.keys())
)
else:
# Convert df headings to fieldid-instance.array
df = ukbiobank.utils.fieldNamesToIds(ukbio=ukbio, df=df)
# Checking for missing fields
df_fields = []
for f in df.columns.tolist():
df_fields.append(f.split("-")[0])
unique_df_fields = list(set(df_fields))
fields_to_add = []
for f in list(fields_to_include.keys()):
if f not in unique_df_fields:
fields_to_add.append(f)
if len(fields_to_add) > 0:
df = ukbiobank.utils.addFields(ukbio=ukbio, df=df, fields=fields_to_add)
# TODO (account for text/ids/mixture etc...)
# convert keys/values from text --> id
# Once all headers are raw Field IDs, and table values are encoded IDs..
"""Below here expected format is e.g. 'eid' '20002-1.0'
1437784 12633
"""
matched_eids = [] # list to collect eids for which a match is found
for field, value in fields_to_include.items():
for instance in instances:
field_instance = str(field) + "-" + str(instance)
# matching all columns with field/instance
field_instance_arrays = [
col for col in df if col.startswith(field_instance)
]
if len(field_instance_arrays) > 0:
field_instance_arrays.append("eid")
# Is there a matching value in any column given the list of values
if not isinstance(value, list):
value = [value]
temp_df = df[field_instance_arrays].isin(value)
# If any column is true, then keep that row (i.e. that 'eid' row)
temp_df = df[field_instance_arrays][temp_df.any(axis=1)]
matched_eids.extend(temp_df["eid"].tolist())
return df[df["eid"].isin(matched_eids)]
```
#### File: ukbiobank/gui/output_csv.py
```python
import wx
from wx.lib.agw import pybusyinfo
import ukbiobank
import ukbiobank.filtering.filtering
class M_AM_B(wx.Frame, ukbiobank.ukbio):
pass
class OutputCsvFrame(wx.Frame, ukbiobank.ukbio):
__metaclass__ = M_AM_B
def __init__(self, parent, ukb):
super().__init__(parent=parent, title="Output CSV", size=wx.DefaultSize)
with wx.FileDialog(
self,
"Save output as csv",
wildcard="csv files (*.csv)|*.csv",
style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,
) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
# Getting filepath
pathname = fileDialog.GetPath()
# Getting selections
s = ukb.SELECTIONS
if s is None:
wx.MessageDialog(self, message="No selections were made.").ShowModal()
return
else:
busy = pybusyinfo.PyBusyInfo(message="Saving csv, please wait...")
# Adding fields to dataframe
if "include_variables" in s and len(s["include_variables"]) > 0:
df = ukbiobank.utils.utils.addFields(
ukbio=ukb, fields=s["include_variables"]
)
# Filtering dataframe according to 'include_illness' selections
if "include_illnesses" in s and len(s["include_illnesses_coded"]) > 0:
df = ukbiobank.filtering.filtering.filterByField(
ukbio=ukb, df=df, fields_to_include=s["include_illnesses_coded"]
)
try:
# Saving
df.to_csv(pathname, index=False)
except IOError:
wx.LogError("Cannot save current data in file '%s'." % pathname)
busy.Show(show=False)
wx.MessageBox(
"CSV saved to: {0}".format(pathname),
"Info",
wx.OK | wx.ICON_INFORMATION,
)
```
#### File: ukbiobank/gui/ukbio_gui.py
```python
import wx
from ukbiobank.gui.load_frame import LoadFrame
# Open GUI
def open():
app = wx.App()
LoadFrame()
app.MainLoop()
return
```
#### File: ukbiobank-tools/ukbiobank/ukbio.py
```python
import os
import pandas as pd
import ukbiobank
class ukbio:
"""
Parameters
----------
ukb_csv : String, mandatory
Path to ukbiobank csv file. ipthon
Example usage::
import ukbiobank
ukb = ukbiobank.ukbio(ukb_csv='path/to/ukbiobank_data.csv')
Returns
-------
ukbio object.
ukbio objects are required as an input when using ukbiobank-tools functions.
ukbio objects contain import information such as:
- variable codings
- path to ukbiobank csv file
"""
def __init__(self, ukb_csv=None):
file_path = os.path.dirname(__file__)
self.csv_path = ukb_csv
self.data_dict = pd.read_csv(
os.path.join(file_path, "data_coding", "Data_Dictionary_Showcase.csv")
)
# Illness/medication codings
# TODO continue insert coding dictionaries.. (for medications etc..)
self.nonCancerIllnessCoding = pd.read_table(
os.path.join(file_path, "data_coding", "coding6.tsv")
)
self.icd9Coding = pd.read_table(
os.path.join(file_path, "data_coding", "coding87.tsv")
)
self.icd10Coding = pd.read_table(
os.path.join(file_path, "data_coding", "coding19.tsv")
)
# Variable/instance codings
self.field_instance_array_df = ukbiobank.utils.getFieldsInstancesArrays(
ukb_csv=self.csv_path, data_dict=self.data_dict
)
``` |
{
"source": "jnederlo/DSCI532_Group204_Vino",
"score": 3
} |
#### File: DSCI532_Group204_Vino/src/varieties_heatmap.py
```python
import altair as alt
import pandas as pd
from vega_datasets import data
from vino_themes import vino_special_heatmap
def wrangle_varieties(df):
"""
Wrangle the data to group by state.
Parameters:
-----------
df -- (pandas DataFrame) Cleaned data in a dataframe.
Returns the data grouped by variety
"""
# Group and aggregate data by wine varieties
variety_df = df.groupby(['variety']).size().reset_index(name='counts')
variety_df = variety_df.sort_values(by='counts')
popular_varieties = variety_df.query('counts > 500')['variety']
# Filter the data set to include only popular grape varieties
varieties_plot_data = df[df['variety'].isin(popular_varieties.tolist())]
return varieties_plot_data
def plot_heatmap(df, x_name='price'):
"""
Plot a heatmap of showing the average value of wines from popular grape varieties at a range of price points.
Parameters:
-----------
df -- (pandas DataFrame) Cleaned data in a dataframe.
Returns altiar plot objects.
"""
# register the custom theme under a chosen name
alt.themes.register('vino_special_heatmap', vino_special_heatmap)
# enable the newly registered theme
alt.themes.enable('vino_special_heatmap')
varieties_chart_data = wrangle_varieties(df)
if x_name == 'price':
varieties_heatmap_plot = alt.Chart(varieties_chart_data.query('price < 50')).mark_rect().encode(
x=alt.X(x_name + ':Q',
bin=alt.Bin(maxbins=10),
title="Price ($)"),
y=alt.Y('variety:O',
title="Grape Variety"),
color=alt.Color('average(value_scaled):Q',
scale=alt.Scale(scheme="bluepurple"),
legend=alt.Legend(
orient='right', title="Average Value")
),
tooltip=[alt.Tooltip('average(points):Q', format='.2f'),
alt.Tooltip('average(price)', format='$.2f'),
alt.Tooltip('average(value_scaled)', format='.2f'),
alt.Tooltip('count(title)')]
).properties(
title="Average Value Scores for Popular Grape Varieties, by Price"
).configure_axis(
grid=False,
labelAngle=0
)
if x_name == 'points':
varieties_heatmap_plot = alt.Chart(varieties_chart_data).mark_rect().encode(
x=alt.X('points:Q',
bin=alt.Bin(maxbins=10),
title="Rating"),
y=alt.Y('variety:O',
title="Grape Variety"),
color=alt.Color('average(value_scaled):Q',
scale=alt.Scale(scheme="bluepurple"),
legend=alt.Legend(
orient='right', title="Average Value")
),
tooltip=[alt.Tooltip('average(points):Q', format='.2f'),
alt.Tooltip('average(price)', format='$.2f'),
alt.Tooltip('average(value_scaled)', format='.2f'),
alt.Tooltip('count(title)')]
).properties(
title="Average Value Scores for Popular Grape Varieties, by Rating"
).configure_axis(
grid=False,
labelAngle=0
)
return varieties_heatmap_plot
if __name__ == "__main__":
df = pd.read_csv('../data/cleaned_data.csv', index_col=0)
plot_heatmap(df)
``` |
{
"source": "jneer/knifedge",
"score": 3
} |
#### File: knifedge/knifedge/tracing.py
```python
import numpy as np
from scipy.optimize import curve_fit
import attr
import matplotlib.pyplot as plt
import matplotlib
import yaml
matplotlib.rc('font', family='DejaVu Sans')
#TODO: use ODR instead of curve_fit to include z-error: http://stackoverflow.com/questions/26058792/correct-fitting-with-scipy-curve-fit-including-errors-in-x
#
@attr.s
class BeamProfile:
"""A cross-sectional profile of a Gaussian beam.
"""
z = attr.ib(default=0)
w = attr.ib(default=1)
z_error = attr.ib(default=0)
w_error = attr.ib(default=0)
class BeamProfileSampled(BeamProfile):
"""Beam profile sampled by intensity measurements at multiple positions
of the knife edge.
"""
@attr.s
class BeamTrace:
"""A trace of the size of a Gaussian beam along its axis.
"""
label = attr.ib(default="")
wavelength = attr.ib(default=1550)
profiles = attr.ib(default=attr.Factory(list))
fit_params = attr.ib(default=None)
fit_params_error = attr.ib(default=None)
def __init__(self):
self.profiles = []
self.fit_params = None
def add_profile(self, profile, update_fit=True):
self.profiles.append(profile)
self.sort_profiles()
if update_fit:
self.fit_trace()
def sort_profiles(self):
self.profiles.sort(key=lambda _: _.z)
def spotsize(self, z, z0, w0, m2=1):
zR = np.pi * w0**2 / (1e-6 * self.wavelength * m2)
return w0 * np.sqrt(1 + ((z - z0) / zR)**2)
def fit_trace(self, p0=None):
z = [p.z for p in self.profiles]
w = [p.w for p in self.profiles]
if p0 is None:
p0 = [z[w.index(min(w))],
min(w),
1]
w_error = [p.w_error for p in self.profiles]
sigma = w_error if all(w_error) else None
absolute_sigma = all(w_error)
bounds = ([-np.inf, 0, 1], [np.inf, np.inf, np.inf])
popt, pcov = curve_fit(self.spotsize, z, w, p0, sigma, absolute_sigma,
bounds=bounds)
self.fit_params = popt
self.fit_params_error = np.sqrt(np.diag(pcov))
print(self.format_fit_result())
def format_fit_result(self):
p_strings = ['z₀: {:.1f} ± {:.1f} mm',
'w₀: {:.4f} ± {:.4f} mm',
'M²: {:.2f} ± {:.2f}']
return '\n'.join([s.format(p, e) for s, p, e in
zip(p_strings, self.fit_params,
self.fit_params_error)])
def profile_8416(z=0, x84=0, x16=1, x_error=0, z_error=0):
"""Create BeamProfile from a 84/16 measurement.
"""
data = {'method': '90/10', 'inputs': locals()}
w = abs(x84 - x16)
if x_error is not None:
w_error = np.sqrt(2) * x_error
else:
w_error = None
profile = BeamProfile(z, w, z_error, w_error)
profile._data = data
return profile
def profile_9010(z=0, x90=0, x10=1, x_error=0, z_error=0):
"""Create BeamProfile from a 90/10 measurement.
"""
data = {'method': '90/10', 'inputs': locals()}
w = abs(x90 - x10) / 1.28
if x_error is not None:
w_error = np.sqrt(2) * x_error
else:
w_error = None
profile = BeamProfile(z, w, z_error, w_error)
profile._data = data
return profile
def traces_from_file(filename):
with open(filename, 'r') as file:
yaml_data = list(yaml.safe_load_all(file))
traces = []
for trace_data in yaml_data:
try:
z_offset = trace_data['z_offset']
dz = trace_data['dz']
measurements = trace_data['measurements']
label = trace_data['label']
wavelength = trace_data['wavelength']
method = trace_data['method']
except KeyError as err:
print('Missing key:', err)
return
assert(len(dz) == len(measurements))
assert(method in ['90/10', '84/16'])
trace = BeamTrace(label, wavelength)
if method == '84/16':
x_error = trace_data.get('x_error', 0)
z_error = trace_data.get('z_error', 0)
for _dz, _meas in zip(dz, measurements):
trace.add_profile(profile_8416(z_offset + _dz, *_meas,
x_error=x_error,
z_error=z_error),
update_fit=False)
if method == '90/10':
x_error = trace_data.get('x_error', 0)
z_error = trace_data.get('z_error', 0)
for _dz, _meas in zip(dz, measurements):
trace.add_profile(profile_9010(z_offset + _dz, *_meas,
x_error=x_error,
z_error=z_error),
update_fit=False)
print('\nBeam trace:', label)
print('Method: {} | z_offset: {} mm | Wavelength: {} nm'.format(
method, z_offset, wavelength))
print('--- Fit result from {} profiles: ---'.format(len(dz)))
trace.fit_trace()
print('------------------------------------')
traces.append(trace)
return traces
def plot_trace(trace, fig=None, ax=None, figsize=(8, 6)):
if not fig and not ax:
fig, ax = plt.subplots(1, 1, figsize=figsize)
z = [p.z for p in trace.profiles]
w = [p.w for p in trace.profiles]
w_error = [p.w_error for p in trace.profiles]
ax.errorbar(z, w, w_error, fmt='.k')
ax.set_xlabel('z [mm]')
ax.set_ylabel('w [mm]')
ax.set_ylim(ymin=0)
ax.set_title(trace.label)
if trace.fit_params is not None:
zs = np.linspace(min(z), max(z), 200)
ws = trace.spotsize(zs, *trace.fit_params)
ax.plot(zs, ws)
ax.text(.1, .9, trace.format_fit_result(),
verticalalignment='top',
transform=plt.gca().transAxes,
bbox=dict(facecolor='red', alpha=0.2))
return fig, ax
def test_code():
traces = traces_from_file('test-tracings.yml')
print(traces)
plot_trace(traces[0])
plt.show()
# error = .03
# t = BeamTrace("test", 1550)
# z = np.linspace(0, 600, 7)
# w = t.spotsize(z, 100, .3, 1) * np.random.normal(1, error, len(z))
# z_error = np.zeros(len(z))
# w_error = np.ones(len(z)) * error
#
# print(z)
# print(w)
#
# profiles = list(map(BeamProfile, z, w, z_error, w_error))
# t = BeamTrace("test", .001550, profiles)
#
# print(t)
#
# for p in [p1, p2, p3, p4]:
# t.add_profile(p, update_fit=True)
# print(t)
# t.fit_trace()
# t.plot_trace()
# plt.show()
if __name__ == '__main__':
test_code()
``` |
{
"source": "jneer/MrMustard",
"score": 2
} |
#### File: mrmustard/lab/detectors.py
```python
from mrmustard.types import *
from mrmustard.utils.parametrized import Parametrized
from mrmustard.lab.abstract import State, FockMeasurement
from mrmustard.physics import fock, gaussian
from mrmustard.lab.states import DisplacedSqueezed, Coherent
from mrmustard import settings
from mrmustard.math import Math
math = Math()
__all__ = ["PNRDetector", "ThresholdDetector", "Homodyne", "Heterodyne"]
class PNRDetector(Parametrized, FockMeasurement):
r"""Photon Number Resolving detector.
If ``len(modes) > 1`` the detector is applied in parallel to all of the modes provided.
If a parameter is a single float, the parallel instances of the detector share that parameter.
To apply mode-specific parmeters use a list of floats. The number of modes is determined (in order of priority)
by the modes parameter, the cutoffs parameter, or the length of the efficiency and dark counts parameters.
One can optionally set bounds for each parameter, which the optimizer will respect.
It can be supplied the full stochastic channel, or it will compute it from
the quantum efficiency (binomial) and the dark count probability (possonian).
Args:
efficiency (float or List[float]): list of quantum efficiencies for each detector
efficiency_trainable (bool): whether the efficiency is trainable
efficiency_bounds (Tuple[float, float]): bounds for the efficiency
dark_counts (float or List[float]): list of expected dark counts
dark_counts_trainable (bool): whether the dark counts are trainable
dark_counts_bounds (Tuple[float, float]): bounds for the dark counts
stochastic_channel (Optional 2d array): if supplied, this stochastic_channel will be used for belief propagation
modes (Optional List[int]): list of modes to apply the detector to
cutoffs (int or List[int]): largest phton number measurement cutoff for each mode
"""
def __init__(
self,
efficiency: Union[float, List[float]] = 1.0,
dark_counts: Union[float, List[float]] = 0.0,
efficiency_trainable: bool = False,
dark_counts_trainable: bool = False,
efficiency_bounds: Tuple[Optional[float], Optional[float]] = (0.0, 1.0),
dark_counts_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None),
stochastic_channel: Matrix = None,
modes: List[int] = None,
cutoffs: Union[int, List[int]] = None,
):
if modes is not None:
num_modes = len(modes)
elif cutoffs is not None:
num_modes = len(cutoffs)
else:
num_modes = max(len(math.atleast_1d(efficiency)), len(math.atleast_1d(dark_counts)))
if len(math.atleast_1d(efficiency)) == 1 and num_modes > 1:
efficiency = math.tile(math.atleast_1d(efficiency), [num_modes])
if len(math.atleast_1d(dark_counts)) == 1 and num_modes > 1:
dark_counts = math.tile(math.atleast_1d(dark_counts), [num_modes])
Parametrized.__init__(
self,
efficiency=efficiency,
dark_counts=dark_counts,
efficiency_trainable=efficiency_trainable,
dark_counts_trainable=dark_counts_trainable,
efficiency_bounds=efficiency_bounds,
dark_counts_bounds=dark_counts_bounds,
stochastic_channel=stochastic_channel,
modes=modes if modes is not None else list(range(num_modes)),
cutoffs=cutoffs if cutoffs is not None else [settings.PNR_INTERNAL_CUTOFF] * num_modes,
)
self.recompute_stochastic_channel()
def should_recompute_stochastic_channel(self):
return self._efficiency_trainable or self._dark_counts_trainable
def recompute_stochastic_channel(self, cutoffs: List[int] = None):
if cutoffs is None:
cutoffs = [settings.PNR_INTERNAL_CUTOFF] * len(self._modes)
self._internal_stochastic_channel = []
if self._stochastic_channel is not None:
self._internal_stochastic_channel = self._stochastic_channel
else:
for c, qe, dc in zip(
cutoffs, math.atleast_1d(self.efficiency)[:], math.atleast_1d(self.dark_counts)[:]
):
dark_prior = math.poisson(max_k=settings.PNR_INTERNAL_CUTOFF, rate=dc)
condprob = math.binomial_conditional_prob(
success_prob=qe, dim_in=settings.PNR_INTERNAL_CUTOFF, dim_out=c
)
self._internal_stochastic_channel.append(
math.convolve_probs_1d(
condprob, [dark_prior, math.eye(settings.PNR_INTERNAL_CUTOFF)[0]]
)
)
class ThresholdDetector(Parametrized, FockMeasurement):
r"""Threshold detector: any Fock component other than vacuum counts toward a click in the detector.
If ``len(modes) > 1`` the detector is applied in parallel to all of the modes provided.
If a parameter is a single float, its value is applied to all of the parallel instances of the detector.
To apply mode-specific values use a list of floats.
It can be supplied the full conditional detection probabilities, or it will compute them from
the quantum efficiency (binomial) and the dark count probability (bernoulli).
Args:
efficiency (float or List[float]): list of quantum efficiencies for each detector
dark_count_prob (float or List[float]): list of dark count probabilities for each detector
efficiency_trainable (bool): whether the efficiency is trainable
dark_count_prob_trainable (bool): whether the dark count probabilities are trainable
efficiency_bounds (Tuple[float, float]): bounds for the efficiency
dark_count_prob_bounds (Tuple[float, float]): bounds for the dark count probabilities
stochastic_channel (Optional 2d array): if supplied, this stochastic_channel will be used for belief propagation
modes (Optional List[int]): list of modes to apply the detector to
"""
def __init__(
self,
efficiency: Union[float, List[float]] = 1.0,
dark_count_prob: Union[float, List[float]] = 0.0,
efficiency_trainable: bool = False,
dark_count_prob_trainable: bool = False,
efficiency_bounds: Tuple[Optional[float], Optional[float]] = (0.0, 1.0),
dark_count_prob_bounds: Tuple[Optional[float], Optional[float]] = (0.0, None),
stochastic_channel=None,
modes: List[int] = None,
):
if modes is not None:
num_modes = len(modes)
else:
num_modes = max(len(math.atleast_1d(efficiency)), len(math.atleast_1d(dark_count_prob)))
if len(math.atleast_1d(efficiency)) == 1 and num_modes > 1:
efficiency = math.tile(math.atleast_1d(efficiency), [num_modes])
if len(math.atleast_1d(dark_count_prob)) == 1 and num_modes > 1:
dark_count_prob = math.tile(math.atleast_1d(dark_count_prob), [num_modes])
Parametrized.__init__(
self,
efficiency=efficiency,
dark_count_prob=dark_count_prob,
efficiency_trainable=efficiency_trainable,
dark_count_prob_trainable=dark_count_prob_trainable,
efficiency_bounds=efficiency_bounds,
dark_count_prob_bounds=dark_count_prob_bounds,
stochastic_channel=stochastic_channel,
modes=modes or list(range(num_modes)),
cutoffs=[2] * num_modes,
)
self.recompute_stochastic_channel()
def should_recompute_stochastic_channel(self):
return self._efficiency_trainable or self._dark_count_prob_trainable
def recompute_stochastic_channel(self, cutoffs: List[int] = None):
if cutoffs is None:
cutoffs = [settings.PNR_INTERNAL_CUTOFF] * len(self._modes)
self._internal_stochastic_channel = []
if self._stochastic_channel is not None:
self._internal_stochastic_channel = self._stochastic_channel
else:
for cut, qe, dc in zip(
cutoffs,
math.atleast_1d(self.efficiency)[:],
math.atleast_1d(self.dark_count_prob)[:],
):
row1 = math.pow(1.0 - qe, math.arange(cut)[None, :]) - math.cast(
dc, self.efficiency.dtype
)
row2 = 1.0 - row1
# rest = math.zeros((cut - 2, cut), dtype=row1.dtype)
condprob = math.concat([row1, row2], axis=0)
self._internal_stochastic_channel.append(condprob)
class Heterodyne(Parametrized, State):
r"""Heterodyne measurement on given modes."""
def __new__(
cls,
x: Union[float, List[float]] = 0.0,
y: Union[float, List[float]] = 0.0,
modes: List[int] = None,
):
instance = Coherent(x=x, y=y, modes=modes)
instance.__class__ = cls # NOTE: naughty?
return instance
def __init__(self, *args, **kwargs):
pass
class Homodyne(Parametrized, State):
r"""Heterodyne measurement on given modes."""
def __new__(
cls,
quadrature_angle: Union[float, List[float]],
result: Union[float, List[float]] = 0.0,
modes: List[int] = None,
r: Union[float, List[float]] = None,
):
quadrature_angle = math.astensor(quadrature_angle, dtype="float64")
result1 = math.astensor(result, dtype="float64")
x = result * math.cos(quadrature_angle)
y = result * math.sin(quadrature_angle)
instance = DisplacedSqueezed(
r=settings.HOMODYNE_SQUEEZING if r is None else math.astensor(r, dtype="float64"),
phi=2 * quadrature_angle,
x=x,
y=y,
)
instance.__class__ = cls
return instance
def __init__(self, *args, **kwargs):
pass
``` |
{
"source": "jneethling/WikiStats",
"score": 3
} |
#### File: WikiStats/src/handlers.py
```python
import os
import psutil
import json
import sqlite3
import threading
from datetime import datetime, timezone
from websocket import create_connection
class CustomHandler:
def __init__(self):
self.working = False
self.counter = 0
self.ws = None
if self.dbReady('./data/wiki_statsDB'):
self.setStatus(True, 'Function handler on standby')
else:
self.setStatus(False, 'Database error, cannot start service')
def dbReady(self, path) -> bool:
try:
self.db = sqlite3.connect(path, check_same_thread=False)
self.cursor = self.db.cursor()
self.cursor.execute('''CREATE TABLE IF NOT EXISTS stats(\
id INTEGER PRIMARY KEY,\
country_name TEXT,\
change_size INTEGER)''')
self.db.commit()
return True
except sqlite3.OperationalError:
return False
def worker(self, stop_event):
while not stop_event.is_set():
result = self.ws.recv()
country = None
if "geo_ip" in result:
j_dict = json.loads(result)
geo = j_dict.get("geo_ip")
country = geo.get("country_name")
change = j_dict.get("change_size")
if change is None:
change = 0
if country is not None:
self.cursor.execute('''INSERT INTO stats(country_name, change_size) VALUES(?,?)''', (country, change))
self.db.commit()
self.counter += 1
def setStatus(self, status, msg):
self.status = status
self.message = msg
def getStatus(self) -> json:
stat_result = os.stat('./data/wiki_statsDB')
modified = datetime.fromtimestamp(stat_result.st_mtime, tz=timezone.utc).strftime("%m/%d/%Y, %H:%M:%S")
msg = {"Status": self.status, "Message": self.message, "Working in background": self.working, "Records in session": self.counter, "DB size (bytes)": stat_result.st_size, "Modified": modified}
return msg
def getMemory(self) -> json:
memory = 1024 * 1024
proc = psutil.Process(os.getpid())
mem0 = proc.memory_info().rss
msg = str(mem0/memory) + 'Mb'
return {'Memory use': msg}
def getTotals(self) -> json:
data = {}
self.cursor.execute('''SELECT country_name, SUM(change_size) FROM stats GROUP BY country_name''')
for row in self.cursor:
data[row[0]] = row[1]
msg = json.dumps(data)
return msg
def getCounts(self) -> json:
data = {}
self.cursor.execute('''SELECT country_name, COUNT(country_name) FROM stats GROUP BY country_name''')
for row in self.cursor:
data[row[0]] = row[1]
msg = json.dumps(data)
return msg
def stopWork(self) -> json:
self.ws.close
self.working = False
self.kill_switch.set()
self.t.join()
self.setStatus(True, 'Function handler on standby')
msg = 'Function handler background work stopped'
return {'message': msg}
def startWork(self) -> json:
if self.working:
msg = 'Function handler already working in background, ignoring request'
return {"message": msg}
else:
self.ws = create_connection("ws://wikimon.hatnote.com:9000")
self.working = True
self.setStatus(True, 'Function handler working in background')
self.kill_switch = threading.Event()
self.t = threading.Thread(target=self.worker, args=(self.kill_switch,))
self.t.start()
msg = 'Function handler background work started'
return {'message': msg}
``` |
{
"source": "jneeven/pybash",
"score": 3
} |
#### File: pybash/pybashrc/execute.py
```python
import inspect
import os
import sys
import traceback
from pathlib import Path
import click
import pybashrc.pybashrc_link as pybashrc_link
_INSTALL_DIR = Path(os.environ["PYBASHRC_INSTALL_DIR"])
# If pybashrc contains an __all__, simply import all functions from there
if hasattr(pybashrc_link, "__all__"):
_FUNCTIONS = {}
for name in getattr(pybashrc_link, "__all__"):
object = getattr(pybashrc_link, name)
if inspect.isfunction(object) or isinstance(object, click.Command):
_FUNCTIONS[name] = object
# If not, import all functions that are in its scope that do not start with _ and
# actually originate from the file itself (i.e. they must not be imported)
else:
_FUNCTIONS = {}
for name in dir(pybashrc_link):
if name.startswith("_"):
continue
object = getattr(pybashrc_link, name)
if (
isinstance(object, click.Command)
# click commands are incompatible with inspect.getfile
and inspect.getfile(object.callback) == pybashrc_link.__file__
):
_FUNCTIONS[name] = object
elif (
inspect.isfunction(object)
and inspect.getfile(object) == pybashrc_link.__file__
):
_FUNCTIONS[name] = object
def _get_function_info(func):
"""Create a string containing the function name, its arguments and the docstring if
there is any."""
command = None
# If this is a click command, use its wrapped function for the time being
if isinstance(func, click.Command):
command = func
func = command.callback
string = f"{func.__name__}{inspect.signature(func)}"
if func.__doc__:
string += f"\n {func.__doc__}"
# After printing the regular info, print the click help string if applicable
if command:
# Needs some extra indentation
help_string = command.get_help(click.Context(command)).replace("\n", "\n ")
string += f"\n {help_string}"
return string
def _update_aliases():
"""Create a bash alias for all of the user-defined functions in ~/.pybashrc.py."""
aliases = (_INSTALL_DIR / "templates" / ".pybashrc_aliases").read_text()
for name in _FUNCTIONS.keys():
aliases += f"alias {name}='pybash {name}'\n"
(_INSTALL_DIR / ".pybashrc_aliases").write_text(aliases)
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Available functions:")
for function in _FUNCTIONS.values():
print(f"- {_get_function_info(function)}\n")
exit(0)
name = sys.argv[1]
# System command not intended to be accessed by the user
if name == "_update_aliases":
_update_aliases()
exit(0)
# Check if the function exists
if name not in _FUNCTIONS.keys():
raise ValueError(f"pybashrc received unknown function name {name}!")
function = _FUNCTIONS[name]
# If this is a click command, let click handle the rest
if isinstance(function, click.Command):
function.main(sys.argv[2:], prog_name=name)
# Otherwise, parse arguments and keyword arguments
args = []
kwargs = {}
for arg in sys.argv[2:]:
if "=" in arg:
key, value = arg.split("=")
kwargs[key] = value
else:
args.append(arg)
# Call function
try:
result = _FUNCTIONS[name](*args, **kwargs)
if result is not None:
print(result)
except TypeError:
# Print the exception without exiting
traceback.print_exc()
# Provide info on how the function should have been called instead
print(f"\nFunction usage:\n{_get_function_info(_FUNCTIONS[name])}")
sys.exit(1)
``` |
{
"source": "jnefoussi/pytechfin",
"score": 2
} |
#### File: pytechfin/pytechfin/carol_sync_monitoring.py
```python
from .misc import get_tenant_techfin
from .enums import EnumApps
class CarolSyncMonitoring:
def __init__(self, techfin):
self.techfin = techfin
def get_pks(self, dm_name, techfin_app, techfin_tenant=None, carol_tenant=None, page_size=1000,
page=1, debug=False, max_hits=None):
"""Get PKs from a data model
Args:
dm_name (str): Data model name
techfin_app (str): techfin app name.
techfin_tenant (str, optional): techfin tenant id. Defaults to None.
carol_tenant (str, optional): carol tenant name. Defaults to Nonte.
page_size (int, optional): number of records to get in each interation. Defaults to 1000.
page (int, optional): initial page to start to fetch the records.. Defaults to 1.
debug (bool, optional): show debug logs.
max_hits (int): Number of records to return.
Returns:
list: List of PKs
"""
max_hits = max_hits or float('inf')
if (techfin_tenant is None and carol_tenant is None):
techfin_tenant = self.techfin.techfin_tenant
if not EnumApps.exists_value(techfin_app):
raise ValueError(
f'techfin_app invalid. Value used" {techfin_app}. Check pytechfin.enums.EnumApps')
techfin_tenant_id = get_tenant_techfin(
techfin_tenant=techfin_tenant, carol_tenant=carol_tenant)
total_data = []
params = {
"dataModel": dm_name,
"page": page,
"pageSize": page_size
}
count = 0
while True:
data = self.techfin.call_api(path=f"provisioner/api/v1/carol-sync-monitoring/{techfin_tenant_id}/ids",
techfin_app=techfin_app, method='GET', params=params, )
if(len(data) == 0) or count>=max_hits:
break
count += len(data)
total_data.extend(data)
params['page'] += 1
if debug:
# TODO: use loggers?
print("total loaded: ", len(total_data), " &page=" +
str(page) + " &pageSize=" + str(page_size))
total_data = [d.replace('-', '') for d in total_data]
return total_data
def get_table_record_count(self, techfin_app, techfin_tenant=None, carol_tenant=None):
"""Get number of records per table in techfin
Args:
techfin_app (str): techfin app name.
techfin_tenant (str, optional): techfin tenant id. Defaults to None.
carol_tenant (str, optional): carol tenant name. Defaults to Nonte.
Returns:
list of dict: counts per data model.
"""
if not EnumApps.exists_value(techfin_app):
raise ValueError(
f'techfin_app invalid. Value used" {techfin_app}. Check pytechfin.enums.EnumApps')
techfin_tenant_id = get_tenant_techfin(
techfin_tenant=techfin_tenant, carol_tenant=carol_tenant)
r = self.techfin.call_api(path=f'provisioner/api/v1/carol-sync-monitoring/{techfin_tenant_id}/table-record-count',
method='GET', techfin_app=techfin_app)
return r
def get_data_by_pk(self, dm_name, techfin_app, pk_list, techfin_tenant=None, carol_tenant=None, page_size=1000, page=1, debug=False, return_dataframe=True):
"""Get PKs from a data model
Args:
dm_name (str): Data model name
techfin_app (str): techfin app name.
pk_list (list): list of keys to get.
techfin_tenant (str, optional): techfin tenant id. Defaults to None.
carol_tenant (str, optional): carol tenant name. Defaults to Nonte.
page_size (int, optional): number of records to get in each interation. Defaults to 1000.
page (int, optional): initial page to start to fetch the records.. Defaults to 1.
debug (bool, optional): show debug logs.
return_dataframe (bool, optional): Return a pandas DataFrame
Returns:
(list of dict, pd.DataFrame):
"""
if (techfin_tenant is None and carol_tenant is None):
techfin_tenant = self.techfin.techfin_tenant
if not EnumApps.exists_value(techfin_app):
raise ValueError(
f'techfin_app invalid. Value used" {techfin_app}. Check pytechfin.enums.EnumApps')
techfin_tenant_id = get_tenant_techfin(
techfin_tenant=techfin_tenant, carol_tenant=carol_tenant)
total_data = []
params = {
"page": page,
"pageSize": page_size
}
while True:
data = self.techfin.call_api(path=f"provisioner/api/v1/datamodel/{techfin_tenant_id}/{dm_name}",
techfin_app=techfin_app, method='POST', params=params, data=pk_list, )
if(len(data) == 0):
break
total_data.extend(data)
params['page'] += 1
if debug:
# TODO: use loggers?
print("total loaded: ", len(total_data), " &page=" +
str(page) + " &pageSize=" + str(page_size))
if return_dataframe:
import pandas as pd
return pd.DataFrame(total_data)
return total_data
```
#### File: pytechfin/pytechfin/carol_techfin.py
```python
from collections import defaultdict
import pandas as pd
# TODO: Add custom pipeline function from
# https://github.com/rafarui/techfin-reprocess/blob/master/functions/custom_pipeline.py
# TODO: Add track_tasks function from
# https://github.com/rafarui/techfin-reprocess/blob/master/functions/carol_task.py
class CarolTechfin:
""" Module to handle Carol's data.
Needed add in Carol Module
"""
def __init__(self, carol):
self.carol = carol
def get_staging_data(self, staging_name,
connector_name='protheus_carol', merge_records=True, columns=None, callback=None, max_workers=30):
""" Get records from a staging table.
Args:
staging_name: `str`,
Staging name to fetch parquet of
merge_records: `bool`, default `True`
This will keep only the most recent record exported. Sometimes there are updates and/or deletions and
one should keep only the last records.
columns: `list`, default `None`
List of columns to fetch.
callback: `callable`, default `None`
Function to be called each downloaded file.
max_workers: `int` default `30`
Number of workers to use when downloading parquet files with pandas back-end.
Returns: `pandas.DataFrame`
DataFrame with the staging data.
"""
# number of workers to download in parallel
max_workers=max_workers
# if you want to download a few columns, ["COLUMNS", "TO", "FETCH"]
col=columns
# maximum records to fetch. P.S.: only works if `max_workers=None`
max_hits=None
# if metadata should be returned (mdmId, mdmLastUpdated, etc)
return_metadata = True
# if records with duplicated ids should be consolidated by pyCarol
merge_records = merge_records
#connector + staging table
connector_name=connector_name
staging = staging_name
# file_pattern = '2021-02'
file_pattern = None
df = self.carol.staging.fetch_parquet(
staging_name=staging,
connector_name=connector_name,
max_workers=max_workers,
columns=col,
merge_records=merge_records,
return_metadata=return_metadata,
max_hits=max_hits,
callback=callback, file_pattern=file_pattern)
return df
def get_realtime_data(self, datamodel_name):
""" Get records from a realtime datamodel
Args:
datamodel_name: ``str`
Carol datamodel name
Returns: `pandas.DataFrame`
DataFrame with the realtime data.
"""
filter = {
"mustList": [
{
"mdmFilterType": "TYPE_FILTER",
"mdmValue": datamodel_name+"Golden"
}
,
{
"mdmFilterType": "TERM_FILTER",
"mdmKey":"mdmMergePending",
"mdmValue": "false"
},
{
"mdmFilterType": "RANGE_FILTER",
"mdmKey": "mdmCounterForEntity",
"mdmValue": [0,'null'],
"mdmValuesQuery": {}
}
]
}
result = self.carol.query(only_hits=True, page_size=1000, print_status=True).query(filter).go().results
realtime = pd.DataFrame(result)
return realtime
def get_cds_data(self, datamodel_name, merge_records=True, columns = None, return_metadata = False, callback=None, max_workers=30):
"""[summary]
Args:
datamodel_name: `str` optional
Carol datamodel name
merge_records: `bool` optional
Merge cds data. Defaults to True.
columns: `list of string` optional
Datamodel's columns. Defaults to None (return all columns).
return_metadata: `bool` optional
Return Carol metadata columns. Defaults to False.
callback: `function` optional
Callback function to handle data. Defaults to None.
max_workers: `int` optional
Number of worker used to process. Defaults to 30.
Returns: `pandas.DataFrame`
DataFrame with the staging data.
"""
df = self.carol.datamodel.fetch_parquet(
dm_name=datamodel_name, max_workers=max_workers,
backend='pandas', return_dask_graph=False, columns=columns, merge_records=merge_records,
return_metadata=return_metadata, max_hits=None, callback=callback , cds=True,
file_pattern=None)
return df
def get_datamodel_relationship_constraints(self, dm_list=None):
"""
Create relationship between data models based on their relationship constraints
Args:
carol: `pycarol.Carol`
CarolAPI() object.
prefix: 'str` default `DM_`
prefix to add to the data model name. e.g.,
if dm_name='mydatamoldel', the result will be "DM_mydatamoldel`
Returns: `defaultdict(set)`
dictionary { "dm1" : {"dm2": "field_dm_1" : "field_dm_2"}}
"""
# find Relationship Constraints
if dm_list is None:
dms = self.carol.datamodel.get_all().template_dict.keys()
else:
dms = dm_list
relationship_constraints = defaultdict(list)
for i in dms:
snap = self.carol.datamodel.get_by_name(i)['mdmRelationshipConstraints']
if snap:
relationship_constraints[i].append({i["mdmTargetEntityName"]:i["mdmSourceTargetFieldName"] for i in snap})
return relationship_constraints
def process_staging(self, stagings_list):
""" Process a list of staging tables
Args:
stagings_list `list str`: List of stagings name
"""
for staging_name in stagings_list:
print(f'adding process staging task to staging: {staging_name} ')
self.carol.cds_staging.process_data(staging_name, connector_name='protheus_carol', recursive_processing=False)
print(f'see more in https://{self.carol.organization}.{self.carol.environment}/{self.carol.domain}/carol-ui/tasks')
def get_carol_record_count(self):
""" Get carol record count from tenant explore stats
Returns:
`dict`
Dict with datamodels stats
"""
response = self.carol.call_api(path=f'v1/dashboard/exploreStatistics?days=3', method='GET')
return response["exploreStats"]
```
#### File: pytechfin/pytechfin/context.py
```python
from .misc import *
from .techfin import Techfin
from .carol_techfin import CarolTechfin
from pycarol import CarolAPI
class Context():
"""Carol context. This class will encapsulate all context modules needed.
Args:
use_production_context (bool, optional): Use techfin production envs.. Defaults to False.
user (str, optional): Carol's user name. Defaults to None.
password (str, optional): <PASSWORD>'s password. Defaults to None.
environment (str, optional): Carol's environment name. Defaults to 'carol.ai'.
organization (str, optional): Carol's organization. Defaults to None.
connector_id (str, optional): Carol's connector Id. Defaults to None.
app_name (str, optional): Carol's app name. Defaults to None.
carol_tenant (str, optional): Carol's tenant name. Defaults to None.
techfin_tenant (str, optional): Techfin tenant Id. Defaults to None.
techfin_auth (TOTVSRacAuth, optional): TOTVS RAC Credentials. Defaults to None
techfin_port (int, optional): API URL port. Defaults to 443.
techfin_host (string, optional): API host address. Defaults to None or totvs.app (production).
"""
def __init__(self, use_production_context=False,
user=None, password=<PASSWORD>, auth=None, environment='carol.ai', organization=None, connector_id=None, app_name=None,
carol_tenant=None, techfin_tenant=None, techfin_auth=None, techfin_port=443, techfin_host=None):
if carol_tenant is None:
self._carol_tenant = get_tenant_name(techfin_tenant)
self._techfin_tenant = techfin_tenant
else:
self._carol_tenant = carol_tenant
self._techfin_tenant = get_tenant_techfin(carol_tenant, techfin_tenant)
if use_production_context:
organization = 'totvstechfin'
app_name = 'techfinplataform'
self._carol = CarolAPI(self._carol_tenant, app_name, auth=auth, user=user, password=password,
environment=environment, organization=organization, connector_id=connector_id)
self._techfin = Techfin(auth=techfin_auth, port=techfin_port, host=techfin_host, techfin_tenant=self.techfin_tenant)
self.carol.add_module('caroltechfin', CarolTechfin)
@property
def carol(self):
return self._carol
@property
def techfin(self):
return self._techfin
@property
def carol_tenant(self):
return self._carol_tenant
@property
def techfin_tenant(self):
return self._techfin_tenant
@property
def current(self):
current_info = {}
# Carol
current_info['carol'] = self.carol.get_current()
current_info['carol']['apps'] = self.carol.apps.all()
# Techfin
current_info['techfin'] = {'tenant_id': self.techfin_tenant}
return current_info
```
#### File: pytechfin/pytechfin/http.py
```python
from urllib3.util.retry import Retry
import requests
from requests.adapters import HTTPAdapter
def _retry_session(retries=5, session=None, backoff_factor=0.5, status_forcelist=(500, 502, 503, 504, 524),
allowed_methods=frozenset(['HEAD', 'TRACE', 'GET', 'PUT', 'OPTIONS', 'DELETE'])):
"""
Static method used to handle retries between calls.
Args:
retries: `int` , default `5`
Number of retries for the API calls
session: Session object dealt `None`
It allows you to persist certain parameters across requests.
backoff_factor: `float` , default `0.5`
Backoff factor to apply between attempts. It will sleep for:
{backoff factor} * (2 ^ ({retries} - 1)) seconds
status_forcelist: `iterable` , default (500, 502, 503, 504, 524).
A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in allowed_methods and the response status code is in
status_forcelist.
allowed_methods: `iterable` , default frozenset(['HEAD', 'TRACE', 'GET', 'PUT', 'OPTIONS', 'DELETE']))
Set of uppercased HTTP method verbs that we should retry on.
Returns:
:class:`requests.Section`
"""
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
method_whitelist=allowed_methods,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
```
#### File: pytechfin/pytechfin/misc.py
```python
import re
def get_guid(tenant):
"""Generate UUID from carol's tenant name
Args:
tenant (str): carol tenant name
Returns:
str: techfin tenant id
"""
tenant = tenant[6:]
uuid_tenant = tenant[:8] + '-' + tenant[8:12] + '-' + tenant[12:16] + '-' + tenant[16:20] + '-' + tenant[20:]
return uuid_tenant
def get_tenant_techfin(carol_tenant, techfin_tenant):
"""Returns techfin tenant id.
Args:
carol_tenant (str): catol tenant name
techfin_tenant (str): techfin tenant id
Raises:
ValueError: Raises error if both parameters are empty
Returns:
str: techfin tenant id
"""
if carol_tenant is None:
if techfin_tenant is None:
raise ValueError('Either `carol_tenant` or `techfin_tenant` must be set.')
return techfin_tenant
else:
return get_guid(carol_tenant)
def is_guid(techfin_tenant):
"""Validate guid arg
Args:
tenant (str): techfin tenant id
Returns:
bool: true if is valid guid value
"""
c = re.compile('[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}', re.I)
res = c.match(techfin_tenant)
return res
def get_tenant_name(techfin_tenant):
"""Returns carol tenant name.
Args:
techfin_tenant (str): techfin tenant id
Raises:
ValueError: Raises error if techfin_tenant is not a valid guid value
Returns:
str: carol tenant name
"""
if techfin_tenant is None:
raise ValueError('Either `carol_tenant` or `techfin_tenant` must be set.')
techfin_tenant = techfin_tenant.strip()
if(is_guid(techfin_tenant)):
return f"tenant{techfin_tenant.replace('-','')}"
else:
raise ValueError(' `techfin_tenant` must be a valid guid value')
```
#### File: pytechfin/tests/test_techfin.py
```python
import unittest
from pytechfin.techfin import Techfin
from pytechfin.enums import Apps
class TestPytechfin(unittest.TestCase):
def setUp(self) -> None:
self.tt = Techfin()
return super().setUp()
def test_raise_not_valid_api_method(self):
with self.assertRaises(AssertionError):
self.tt.call_api(path="provisioner/api/v1/provisioning", techfin_app=Apps.CASHFLOW.value, method='FAKE')
if __name__ == '__main__':
unittest.main(verbosity=2)
``` |
{
"source": "jnegrete2005/JuradoFMS",
"score": 2
} |
#### File: JuradoFMS/fms/urls.py
```python
from django.contrib import admin
from django.contrib.auth.decorators import user_passes_test
from django.urls import path, include
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import RedirectView
from graphene_django.views import GraphQLView
def admin_required(function=None):
actual_decorator = user_passes_test(
lambda u: u.is_superuser,
login_url='/vota/',
redirect_field_name=''
)
if function:
return actual_decorator(function)
return actual_decorator
urlpatterns = [
path('admin/', admin.site.urls),
path('vota/', include('vote.urls')),
path('', RedirectView.as_view(url='vota/')),
path('graphql/', GraphQLView.as_view(graphiql=False)),
path('graphiql/', admin_required(GraphQLView.as_view(graphiql=True))),
]
```
#### File: vote/graphql/query.py
```python
from .types import CompetitorType, VotingPollType, CompMode
from ..models import Competitor, VotingPoll
import graphene
class Query(graphene.ObjectType):
voting_poll = graphene.Field(VotingPollType, id=graphene.ID())
comp = graphene.Field(CompetitorType, id=graphene.ID())
get_mode = graphene.Field(CompMode, id=graphene.ID(), mode=graphene.String())
def resolve_voting_poll(root, info, id):
return VotingPoll.objects.get(pk=id)
def resolve_comp(root, info, id):
return Competitor.objects.get(pk=id)
def resolve_get_mode(root, info, id, mode):
return CompMode(mode=Competitor.objects.get(id=id).__dict__[mode])
```
#### File: JuradoFMS/vote/models.py
```python
from django.db.models import (
Model, CharField, ForeignKey, PositiveSmallIntegerField, CASCADE, FloatField
)
from django.db import models
from django.contrib.postgres.fields import ArrayField
# Create your models here.
class Competitor(Model):
"""
Competitor model object
"""
name = CharField(max_length=20)
easy = ArrayField(FloatField(), size=9, null=True, blank=True, verbose_name='Easy Mode')
hard = ArrayField(FloatField(), size=9, null=True, blank=True, verbose_name='Hard Mode')
tematicas_1 = ArrayField(FloatField(), size=7, null=True, blank=True, verbose_name='Tematicas 1')
tematicas_2 = ArrayField(FloatField(), size=7, null=True, blank=True, verbose_name='Tematicas 2')
random_score = ArrayField(FloatField(), size=11, null=True, blank=True, verbose_name='Random Mode')
min1 = ArrayField(FloatField(), size=18, null=True, blank=True, verbose_name='minuto 1')
min2 = ArrayField(FloatField(), size=18, null=True, blank=True, verbose_name='minuto 2')
deluxe = ArrayField(FloatField(), size=14, null=True, blank=True, verbose_name='Deluxe')
replica = ArrayField(FloatField(), size=9, null=True, blank=True, verbose_name='Replica')
_list = [ 'easy', 'hard', 'tematicas_1', 'tematicas_2', 'random_score', 'min1', 'min2', 'deluxe', 'replica' ]
def __str__(self):
return self.name
def __getitem__(self, index: int):
return self.__dict__[self._list[index]]
def __setitem__(self, index: int, value: list):
self.__dict__[self._list[index]] = value
self.save(update_fields=[self._list[index]])
class VotingPoll(Model):
"""
VotingPoll model object
"""
comp_1 = ForeignKey(Competitor, on_delete=CASCADE, related_name='comp_1')
comp_2 = ForeignKey(Competitor, on_delete=CASCADE, related_name='comp_2')
rep_counter = PositiveSmallIntegerField(default=0)
winner = CharField(max_length=20, null=True, blank=True)
def __str__(self):
return f'{self.comp_1} vs {self.comp_2}'
```
#### File: vote/tests/util.py
```python
from json import loads
from ..models import Competitor, VotingPoll
from graphene_django.utils.testing import graphql_query
def setUp(self):
# Create Competitors
c1_a = Competitor.objects.create(
name='Si',
easy=[1,1,1,1,1,1,1,1,1],
hard=[1,1,1,1,1,1,1,1,1],
tematicas_1=[1,1,1,1,1,1,1],
tematicas_2=[1,1,1,1,1,1,1],
random_score=[1,1,1,1,1,1,1,1,1,1,1],
min1=[1,1,1,1,1,1,1,1,1],
min2=[1,1,1,1,1,1,1,1,1],
deluxe=[1,1,1,1,1,1,1,1,1,1,1,1,1,1]
)
c1_b = Competitor.objects.create(
name='No',
easy=[2,2,9,2,2,2,2,2,9],
hard=[2,2,2,2,2,2,2,2,2],
tematicas_1=[2,2,2,2,2,2,2],
tematicas_2=[2,2,2,2,2,2,2],
random_score=[2,2,2,2,2,2,2,2,2,2,2],
min1=[2,2,2,2,2,2,2,2,2],
min2=[2,2,2,2,2,2,2,2,2],
deluxe=[2,2,2,2,2,2,2,2,2,2,2,2,2,2]
)
c2_a = Competitor.objects.create(
name='Replica 1',
easy=[3,3,3,3,3,3,3,3,3],
hard=[3,3,3,3,3,3,3,3,3],
tematicas_1=[3,3,3,3,3,3,3],
tematicas_2=[3,3,3,3,3,3,3],
random_score=[3,3,3,3,3,3,3,3,3,3,3],
min1=[3,3,3,3,3,3,3,3,3],
min2=[3,3,3,3,3,3,3,3,3],
deluxe=[3,3,3,3,3,3,3,3,3,3,3,3,3,3],
replica=[3,3,3,3,3,3,3,9,9]
)
c2_b = Competitor.objects.create(
name='Replica 2',
easy=[3,3,3,3,3,3,3,3,9],
hard=[3,3,3,3,3,3,3,3,3],
tematicas_1=[3,3,3,3,3,3,3],
tematicas_2=[3,3,3,3,3,3,3],
random_score=[3,3,3,3,3,3,3,3,3,3,3],
min1=[3,3,3,3,3,3,3,3,3],
min2=[3,3,3,3,3,3,3,3,3],
deluxe=[3,3,3,3,3,3,3,3,3,3,3,3,3,3],
replica=[3,3,3,3,3,3,3,3,3]
)
# Create VotingPolls
poll1 = VotingPoll.objects.create(
comp_1=c1_a,
comp_2=c1_b
)
poll2 = VotingPoll.objects.create(
comp_1=c2_a,
comp_2=c2_b
)
@classmethod
def setUpTestData(cls):
# Create Competitors
c1_a = Competitor.objects.create(
name='Si',
easy=[1,1,1,1,1,1,1,1,1],
hard=[1,1,1,1,1,1,1,1,1],
tematicas_1=[1,1,1,1,1,1,1],
tematicas_2=[1,1,1,1,1,1,1],
random_score=[1,1,1,1,1,1,1,1,1,1,1],
min1=[1,1,1,1,1,1,1,1,1],
min2=[1,1,1,1,1,1,1,1,1],
deluxe=[1,1,1,1,1,1,1,1,1,1,1,1,1,1]
)
c1_b = Competitor.objects.create(
name='No',
easy=[2,2,9,2,2,2,2,2,9],
hard=[2,2,2,2,2,2,2,2,2],
tematicas_1=[2,2,2,2,2,2,2],
tematicas_2=[2,2,2,2,2,2,2],
random_score=[2,2,2,2,2,2,2,2,2,2,2],
min1=[2,2,2,2,2,2,2,2,2],
min2=[2,2,2,2,2,2,2,2,2],
deluxe=[2,2,2,2,2,2,2,2,2,2,2,2,2,2]
)
c2_a = Competitor.objects.create(
name='Replica 1',
easy=[3,3,3,3,3,3,3,3,3],
hard=[3,3,3,3,3,3,3,3,3],
tematicas_1=[3,3,3,3,3,3,3],
tematicas_2=[3,3,3,3,3,3,3],
random_score=[3,3,3,3,3,3,3,3,3,3,3],
min1=[3,3,3,3,3,3,3,3,3],
min2=[3,3,3,3,3,3,3,3,3],
deluxe=[3,3,3,3,3,3,3,3,3,3,3,3,3,3],
replica=[3,3,3,3,3,3,3,9,9]
)
c2_b = Competitor.objects.create(
name='Replica 2',
easy=[3,3,3,3,3,3,3,3,9],
hard=[3,3,3,3,3,3,3,3,3],
tematicas_1=[3,3,3,3,3,3,3],
tematicas_2=[3,3,3,3,3,3,3],
random_score=[3,3,3,3,3,3,3,3,3,3,3],
min1=[3,3,3,3,3,3,3,3,3],
min2=[3,3,3,3,3,3,3,3,3],
deluxe=[3,3,3,3,3,3,3,3,3,3,3,3,3,3],
replica=[3,3,3,3,3,3,3,3,3]
)
# Create VotingPolls
poll1 = VotingPoll.objects.create(
comp_1=c1_a,
comp_2=c1_b
)
poll2 = VotingPoll.objects.create(
comp_1=c2_a,
comp_2=c2_b
)
class Query:
""" Will query what you pass in, with the variables """
def __init__(self, query: str, variables = {}):
self.response = graphql_query(query, variables=variables)
self.content = loads(self.response.content)
def get_key_by_val(my_dict: dict, val: str or int):
for key, value in my_dict.items():
if val == value:
return key
raise Exception('Key doesn\'t exist')
def index_dict(dictionary, n=0):
if n < 0:
n += len(dictionary)
for i, key in enumerate(dictionary.keys()):
if i == n:
return dictionary[key]
raise IndexError("dictionary index out of range")
modes_to_int = {
'easy': 0,
'hard': 1,
'tematicas_1': 2,
'tematicas_2': 3,
'random_score': 4,
'min1': 5,
'min2': 6,
'deluxe': 7,
'end': 8,
'replica': 9,
}
mode_aliases = {
'easy': 'Easy Mode',
'hard': 'Hard Mode',
'tematicas_1': 'Primera Temática',
'tematicas_2': 'Segunda Temática',
'random_score': 'Random Mode',
'min1': 'Primer Minuto',
'min2': 'Segundo Minuto',
'deluxe': 'Deluxe',
'end': 'end',
'replica': 'Réplica',
}
``` |
{
"source": "jneight/django-changelog-models",
"score": 2
} |
#### File: django-changelog-models/changelog_models/models.py
```python
import copy
from django.db import models
from django.utils import six, timezone
class HistoryMetaclass(models.base.ModelBase):
def __new__(cls, classname, bases, classdict):
# with this, we avoid having to declare HistoryManager in each class inheriting from HistoryMixin
classdict['_history'] = HistoryManager()
klass = super(HistoryMetaclass, cls).__new__(cls, classname, bases, classdict)
# TODO: add checks to avoid unknown attributes in class, etc.
history_meta = classdict.pop('HistoryMeta', None)
setattr(klass, '_meta_history', history_meta)
return klass
class HistoryManager(object):
def contribute_to_class(self, cls, name):
"""When initializing a model, django tries to call `contribute_to_class` for
each attribute defined
"""
# after class got ready, fields can be iterated
models.signals.class_prepared.connect(self._parent_prepared, sender=cls)
def _parent_prepared(self, sender, **kwargs):
"""Parent class is ready, so the dynamic class for history can be generated"""
history_class = self._prepare_history_class(sender)
setattr(sender, '_history', history_class)
def _prepare_history_class(self, sender):
"""The important stuff is handled here, the history class cloning `sender` attributes
is created and syncdb, makemigrations, etc, will create the table
"""
if sender._meta.abstract:
return
model_name = '{0}_history'.format(sender._meta.object_name)
# just avoid the creation of duplicate models
if model_name.lower() in sender._meta.app_config.models:
return sender._meta.app_config.models.get_model(model_name)
# this dict will store all the needed data to build a valid cloned model
history_properties = {
'__module__': self.__class__.__module__,
}
# define the table name for the log
history_meta = {
'db_table': '{0}_history'.format(sender._meta.db_table),
'app_label': sender._meta.app_label,
'verbose_name': '{0} History'.format(sender._meta.verbose_name),
}
for field in sender._meta.fields:
# copy fields, for now, only flat fields (char, integers, etc)
if not field.rel:
field = copy.copy(field)
history_properties[field.name] = field
if type(field) == models.AutoField:
history_properties[field.name] = models.IntegerField()
# history fields:
history_properties['history_id'] = models.CharField(max_length=250)
history_properties['history_timestamp'] = models.DateTimeField()
history_properties['id'] = models.AutoField(primary_key=True)
history_properties['Meta'] = type('Meta', (), history_meta)
model_name = '{0}_history'.format(sender._meta.object_name)
# the most important stuff, connect the signals to save changes!!
models.signals.post_save.connect(_post_modification_to_history, sender=sender)
models.signals.post_delete.connect(_post_modification_to_history, sender=sender)
# the history class with all ready!
# history model is now available using apps.get_model
return type(model_name, (models.Model,), history_properties)
def _post_modification_to_history(instance, **kwargs):
instance._populate_history()
class HistoryMixin(six.with_metaclass(HistoryMetaclass, models.Model)):
class Meta:
abstract = True
class HistoryMeta:
# for future django compatibility, it's recommended to create a new Meta class,
# instead of adding new attributes to existing _meta.
fields = []
modified_timestamp = 'modified'
def _populate_history(self):
"""Copy all the data to the history model and saves it"""
history = self._history()
if self._meta_history.modified_timestamp:
history.history_timestamp = getattr(self, self._meta_history.modified_timestamp)
else:
history.history_timestamp = timezone.now()
for field in history._meta.get_all_field_names():
if field == history._meta.pk.name:
continue
if hasattr(self, field):
setattr(history, field, getattr(self, field))
history.history_id = getattr(self, self._meta.pk.name)
history.save()
``` |
{
"source": "jneight/django-xadmin-extras",
"score": 3
} |
#### File: django-xadmin-extras/xadmin_extras/apps.py
```python
class AdminAppMixin(object):
def init_menu(self):
"""Add custom menu entries to the menu displayed for this app
Return a list of dicts, each dict will be a entry for the submenu of
the app:
{'url': '/admin/.../', 'icon': 'bolt', 'title': 'Custom'}
also 'perm' and 'order' keys can be added.
"""
return []
def has_menu_permission(self, obj=None):
return True
``` |
{
"source": "jneight/pydup",
"score": 3
} |
#### File: pydup/pydup/pydup.py
```python
from __future__ import division
import re
import hashlib
import struct
import math
from collections import defaultdict
repeated_char_pattern = re.compile(r"(.)\1{1,}", re.DOTALL)
def tokenize_sentence(sentence):
for word in sentence.split(' '):
# for full in nltk.tokenize.sent_tokenize(sentence):
# for word in nltk.tokenize.word_tokenize(full):
word = word.lower()
word = _replace_repeated_chars(word)
word = word.strip('\'"!?,.')
yield word
def _replace_repeated_chars(sentence):
"""look for 2 or more repetitions of character and replace with the character
itself
"""
return repeated_char_pattern.sub(r"\1\1", sentence)
def _minHash_similarity(hashesA, hashesB, N):
""" Check how many of the hashes in the two groups are equal, that means the same token
was found in the two sentences
A higher result, more similarity between sentences
O(N M^2)
"""
count = sum(1 for i in range(N) if hashesA[i] == hashesB[i])
return count/N
def _hamming_distance(bitsA, bitsB, N):
"""Hamming distance returns the number of equals bits"""
X = '{:b}'.format(bitsA ^ bitsB)
X = X.zfill(N) # pad with leading zero to match the N bits
return sum(1 for i in range(N) if int(X[i]) == 1)
def _generate_hash(seed, token):
""" Calculate the hash given a seed plus token, then the digest is unpacked
and converted to int, the final hash is just 4 from the first 4 chars of the digest. Any
consistent hashing function should be enough
:returns: Integer of the partial digest
"""
return struct.unpack(
'i', hashlib.md5(u'{0}${1}'.format(seed, token).encode('utf-8')).digest()[0:4])[0]
def minHash(tokens, N):
""" For each token, will generate N hashes, then get the minimal ones
and returns it.
"""
final_hashes = list()
for seed in range(N):
hash_list = [_generate_hash(seed, token) for token in tokens]
final_hashes.append(min(hash_list))
return final_hashes
def bitsimilarity(bitsA, bitsB, N):
"""Returns the percent of similarity (using Hamming distance)
O(M^2)
"""
distance = _hamming_distance(bitsA, bitsB, N)
similarity = 1 - distance / N
return similarity
def bitsampling(hashes, N):
""" Instead of storing the minHash, we can do better just storing the least significant bit
of each hash
returns a bit vector with the least significat bits
"""
bits = 0
for i in range(N):
bits = (bits << 1) | (hashes[i] & 1)
return bits
def split_chunks(bits, chunk_size):
"""Split the bitvector in groups of bits
:param bits: bitvector
:param chunk_size: number of bits per each chunk
"""
# pad to left with zero to match the chunk multiple bits
#bit_vector = '{{:0>{0}}}'.format(chunk_size).format('{:b}'.format(bits))
bit_vector = '{:b}'.format(bits).zfill(chunk_size)
chunks = []
for i in range(0, len(bit_vector), chunk_size):
chunks.append(int(bit_vector[i:i+chunk_size], 2))
return chunks
def generate_close_chunks(chunk):
"""Generates close chunks, numbers with one bit difference with original chunk
returns list of chunks
"""
size = len('{:b}'.format(chunk))
close_chunks = []
for i in range(size):
# apply a XOR operations with a zero-vector with just one bit as 1, the bit is
# moved each iteration
close_chunks.append(chunk ^ (1 << i))
close_chunks.append(chunk)
return close_chunks
class LSHTable(object):
def __init__(self, hash_iter=32, radius=4):
"""
:param hash_iter: the number of different hashes to be generated per token (chosen empirically),
also represents the bitvector length
:param radius: number of unequal bits to match two bitvectors
"""
self._hash_iter = hash_iter
self._radius = radius
self._chunk_size = hash_iter // self._radius
# initialize an empty table
self._table = [defaultdict(list) for i in range(int(math.ceil(self._hash_iter / self._chunk_size)))]
def bitvector_from_tokens(self, tokens):
hashes = minHash(tokens, N=self._hash_iter) # minimal hashes generated in each iteration
return bitsampling(hashes, N=self._hash_iter) # take the less significant bit of each hash
def add(self, bitvector):
chunks = split_chunks(bitvector, self._chunk_size)
for i, chuck in enumerate(chunks):
self._table[i][chuck].append(bitvector)
def lookup(self, bitvector):
chunks = split_chunks(bitvector, self._chunk_size)
matches = []
for i, chunk in enumerate(chunks):
close_chunks = generate_close_chunks(chunk)
for close in close_chunks:
if close in self._table[i]:
matches.extend(self._table[i][close])
return matches
``` |
{
"source": "jneil1992/Archon",
"score": 2
} |
#### File: Archon/cogs/administration.py
```python
import discord
from discord.ext import commands, tasks
import json
from ast import literal_eval
import os
from pathlib import Path
from rcon.commands import Rcon
from rcon.instances import check_perms, Instance
from rcon.map_rotation import MAPS_SQUAD, MAPS_BTW, MAPS_PS
from utils import Config, base_embed
config = Config()
class administration(commands.Cog):
"""Server administration commands"""
def __init__(self, bot):
self.bot = bot
@commands.command(description="Request a command through RCON", usage="r!execute <cmd>", aliases=["exec"], hidden=False)
@check_perms(execute=True)
async def execute(self, ctx, *, cmd):
inst = self.bot.cache.instance(ctx.author, ctx.guild.id)
res = inst.rcon.exec_command(cmd)
res = literal_eval(res)
if not res: # An empty list was received
res = "Empty response received"
else:
res = res[0].strip(b'\x00\x01').decode('utf-8')
if len(res) > 1018: # too big to be sent in one embed field
res = res[:1015] + "..."
embed = base_embed(inst.id, title="Command executed")
embed.add_field(name="Request", value=f"`{cmd}`", inline=False)
embed.add_field(name="Response", value=f"```{res}```")
await ctx.send(embed=embed)
@commands.command(description="Set the max player limit", usage="r!set_max_player_limit", aliases=["set_player_limit", "set_max_player_limit", "set_max_players"])
@check_perms(config=True)
async def player_limit(self, ctx, limit: int):
inst = self.bot.cache.instance(ctx.author, ctx.guild.id)
res = inst.rcon.set_max_player_limit(limit)
embed = base_embed(inst.id, title="Max player limit changed", description=res)
await ctx.send(embed=embed)
@commands.command(description="Set or remove a server password", usage="r!password [password]", aliases=["set_password"])
@check_perms(password=True)
async def password(self, ctx, password: str = ""):
inst = self.bot.cache.instance(ctx.author, ctx.guild.id)
res = inst.rcon.set_password(password)
embed = base_embed(inst.id, title="Password updated", description=res)
await ctx.send(embed=embed)
@commands.command(description="Set the clock speed on the server", usage="r!slomo <percentage>", aliases=["clockspeed", "clock_speed"])
@check_perms(cheat=True)
async def slomo(self, ctx, percentage: str):
try:
if percentage.endswith("%"): percentage = float(percentage[:-1]) / 100
else: percentage = float(percentage)
except ValueError:
raise commands.BadArgument('%s needs to be a percentage' % percentage)
inst = self.bot.cache.instance(ctx.author, ctx.guild.id)
res = inst.rcon.set_clockspeed(percentage)
embed = base_embed(inst.id, title="Server clockspeed adjusted", description=res)
await ctx.send(embed=embed)
@commands.group(invoke_without_command=True, description="Enable or disable a custom map rotation", usage="r!rotation [subcommand]", aliases=["map_rotation", "rotation"])
@check_perms(config=True)
async def maprotation(self, ctx):
inst = self.bot.cache.instance(ctx.author, ctx.guild.id)
embed = base_embed(inst.id, title="Custom Map Rotation")
embed.description = "`r!rotation upload` - Upload a new custom rotation\n`r!rotation enable` - Enable the custom rotation\n`r!rotation disable` - Disable custom rotation\n`r!rotation download` - Download your custom rotation"
if os.path.exists(Path(f'rotations/{str(inst.id)}.json')):
if inst.map_rotation:
embed.color = discord.Color.green()
embed.title += " | Status: Enabled"
else:
embed.color = discord.Color.red()
embed.title += " | Status: Disabled"
try: maps = sorted(set([str(entry) for entry in inst.map_rotation.get_entries()]))
except Exception as e:
maps = ["Failed to fetch maps"]
raise e
embed.add_field(name="Maps in rotation:", value="\n".join(maps))
else:
embed.title += " | Status: Unconfigured"
await ctx.send(embed=embed)
@maprotation.command()
@check_perms(config=True)
async def upload(self, ctx):
if not ctx.message.attachments:
await ctx.send(f":no_entry_sign: Please include your custom rotation as an attachment!")
return
attachment = ctx.message.attachments[0]
if attachment.size > 1000000:
await ctx.send(f":no_entry_sign: Invalid attachment!\n`File too big! Maximum is 1000000 bytes but received {str(attachment.size)} bytes`")
return
if not attachment.filename.endswith(".json"):
extension = "." + attachment.filename.split(".")[-1]
await ctx.send(f":no_entry_sign: Invalid attachment!\n`Invalid file extension! Expected .json but received {extension}`")
return
inst = self.bot.cache.instance(ctx.author, ctx.guild.id)
content = str(await attachment.read(), 'utf-8')
content = json.loads(content)
inst.import_rotation(content=content)
with open(Path(f'rotations/{str(inst.id)}.json'), 'w+') as f:
f.write(json.dumps(content, indent=2))
Instance(inst.id).set_uses_custom_rotation(1)
game = Instance(inst.id).game.upper()
if game == 'SQUAD': valid_maps = MAPS_SQUAD
elif game == 'BTW': valid_maps = MAPS_BTW
elif game == 'PS': valid_maps = MAPS_PS
else: valid_maps = []
embed = base_embed(inst.id, title="Uploaded and enabled Custom Map Rotation", color=discord.Color.green())
embed.description = "`r!rotation upload` - Upload a new custom rotation\n`r!rotation enable` - Enable the custom rotation\n`r!rotation disable` - Disable custom rotation\n`r!rotation download` - Download your custom rotation"
try:
maps = sorted(set([str(entry) for entry in inst.map_rotation.get_entries()]))
for m in maps:
if m not in valid_maps:
maps[maps.index(m)] += " ⚠️"
except: maps = ["Failed to fetch maps"]
embed.add_field(name="Maps in rotation:", value="\n".join(maps))
if " ⚠️" in "\n".join(maps):
embed.add_field(name='⚠️ Warning ⚠️', value="Some maps are not recognized and could be invalid. Please verify that the marked layers are correct.")
await ctx.send(embed=embed)
@maprotation.command()
@check_perms(config=True)
async def enable(self, ctx):
inst = self.bot.cache.instance(ctx.author, ctx.guild.id)
if inst.map_rotation:
await ctx.send(':no_entry_sign: Custom Map Rotation is already enabled!')
return
path = Path(f'rotations/{str(inst.id)}.json')
if not os.path.exists(path):
await ctx.send(':no_entry_sign: Upload a custom rotation first using `r!rotation upload`!')
return
inst.import_rotation(fp=path)
Instance(inst.id).set_uses_custom_rotation(1)
embed = base_embed(inst.id, title="Enabled Custom Map Rotation", color=discord.Color.green())
embed.description = "`r!rotation upload` - Upload a new custom rotation\n`r!rotation enable` - Enable the custom rotation\n`r!rotation disable` - Disable custom rotation\n`r!rotation download` - Download your custom rotation"
try: maps = sorted(set([str(entry) for entry in inst.map_rotation.get_entries()]))
except: maps = ["Failed to fetch maps"]
embed.add_field(name="Maps in rotation:", value="\n".join(maps))
await ctx.send(embed=embed)
@maprotation.command()
@check_perms(config=True)
async def disable(self, ctx):
inst = self.bot.cache.instance(ctx.author, ctx.guild.id)
if inst.map_rotation == None:
await ctx.send(':no_entry_sign: Custom Map Rotation is already disabled!')
return
inst.map_rotation = None
Instance(inst.id).set_uses_custom_rotation(0)
embed = base_embed(inst.id, title="Disabled Custom Map Rotation", color=discord.Color.red())
embed.description = "`r!rotation upload` - Upload a new custom rotation\n`r!rotation enable` - Enable the custom rotation\n`r!rotation disable` - Disable custom rotation\n`r!rotation download` - Download your custom rotation"
await ctx.send(embed=embed)
@maprotation.command()
@check_perms(config=True)
async def download(self, ctx):
inst = self.bot.cache.instance(ctx.author, ctx.guild.id)
path = Path(f'rotations/{str(inst.id)}.json')
if not os.path.exists(path):
await ctx.send(':no_entry_sign: You don\'t have a custom rotation uploaded!')
return
f = discord.File(fp=path, filename=f"{Instance(inst.id).name} map rotation.json")
await ctx.send(file=f)
def setup(bot):
bot.add_cog(administration(bot))
``` |
{
"source": "jneilliii/OctoPrint-BLVLogger",
"score": 2
} |
#### File: OctoPrint-BLVLogger/octoprint_blvlogger/__init__.py
```python
from __future__ import absolute_import
import json
import os
import sqlite3
from datetime import datetime, timedelta
import flask
import octoprint.plugin
class BlvloggerPlugin(
octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.EventHandlerPlugin,
octoprint.plugin.SimpleApiPlugin,
octoprint.plugin.StartupPlugin,
):
def __init__(self):
self.db_path = None
##~~ StartupPlugin mixin
def on_startup(self, host, port):
self.db_path = os.path.join(
self.get_plugin_data_folder(), "bedlevelvisualizer.db"
)
if not os.path.exists(self.db_path):
db = sqlite3.connect(self.db_path)
cursor = db.cursor()
cursor.execute(
"""CREATE TABLE mesh_history_data(id INTEGER PRIMARY KEY, timestamp TEXT, mesh TEXT, bed TEXT)"""
)
db.commit()
db.close()
##~~ SettingsPlugin mixin
def get_settings_defaults(self):
return {}
##~~ AssetPlugin mixin
def get_assets(self):
return {
"js": ["js/blvlogger.js"],
}
##~~ EventHandlerPlugin mixin
def on_event(self, event, payload):
if event == "plugin_bedlevelvisualizer_mesh_data_collected":
if payload.get("mesh", False) and self.db_path is not None:
today = datetime.today()
mesh_data = payload["mesh"]
bed_data = payload["bed"]
db = sqlite3.connect(self.db_path)
cursor = db.cursor()
cursor.execute(
"""INSERT INTO mesh_history_data(timestamp, mesh, bed) VALUES(?,?,?)""",
[today.isoformat(" "), json.dumps(mesh_data), json.dumps(bed_data)],
)
db.commit()
db.close()
self.send_history_data()
if event == "ClientOpened":
self.send_history_data()
##~~ SimpleApiPlugin mixin
def on_api_get(self, request):
if request.args.get("start") and request.args.get("end"):
self.send_history_data(
start=request.args.get("start"), end=request.args.get("end")
)
return flask.jsonify(
start=request.args.get("start"), end=request.args.get("end")
)
else:
return flask.make_response("There was an error", 200)
##~~ Utility Functions
def send_history_data(self, start=None, end=None):
if self.db_path is not None:
if start is None:
start = datetime.today().date() - timedelta(days=1)
if end is None:
end = datetime.today().date() + timedelta(days=1)
db = sqlite3.connect(self.db_path)
cursor = db.cursor()
cursor.execute(
"""SELECT timestamp, mesh, bed FROM mesh_history_data WHERE timestamp BETWEEN ? AND ? ORDER BY timestamp DESC""",
[start, end],
)
mesh_history_data = {"mesh": cursor.fetchall()}
self._plugin_manager.send_plugin_message(self._identifier, mesh_history_data)
##~~ Softwareupdate hook
def get_update_information(self):
return {
"blvlogger": {
"displayName": "BLV Logger",
"displayVersion": self._plugin_version,
# version check: github repository
"type": "github_release",
"user": "jneilliii",
"repo": "OctoPrint-BLVLogger",
"current": self._plugin_version,
"stable_branch": {
"name": "Stable",
"branch": "master",
"comittish": ["master"],
},
"prerelease_branches": [
{
"name": "Release Candidate",
"branch": "rc",
"comittish": ["rc", "master"],
}
],
# update method: pip
"pip": "https://github.com/jneilliii/OctoPrint-BLVLogger/archive/{target_version}.zip",
}
}
__plugin_name__ = "BLV Logger"
__plugin_pythoncompat__ = ">=2.7,<4" # python 2 and 3
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = BlvloggerPlugin()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.plugin.softwareupdate.check_config": __plugin_implementation__.get_update_information
}
``` |
{
"source": "jneines/TrivialMessageBus",
"score": 3
} |
#### File: TrivialMessageBus/TrivialMessageBus/trivial_messagebus.py
```python
import logging
import zmq
__author__ = "jneines"
__license__ = "MIT"
__copyright__ = "2016 "+__author__
__url__ = "https://github.com/jneines/TrivialMessageBus"
class TrivialMessageBus(object):
""" A trivial implementation for a messagebus """
def __init__(self, subscriber_port, publisher_port):
""" Initialization """
logging.debug("init")
self.subscriber_port=subscriber_port
self.publisher_port=publisher_port
self.context=zmq.Context()
def run(self):
""" run as a proxy in a main loop """
logging.debug("run")
try:
logging.debug("Creating subsciber on port {0:d}.".format(self.subscriber_port))
self.subscriber=self.context.socket(zmq.SUB)
self.subscriber.bind("tcp://*:{0:d}".format(self.subscriber_port))
self.subscriber.setsockopt(zmq.SUBSCRIBE, b"")
logging.debug("Creating publisher on port {0:d}.".format(self.publisher_port))
self.publisher=self.context.socket(zmq.PUB)
self.publisher.bind("tcp://*:{0:d}".format(self.publisher_port))
logging.info("Starting proxy service.")
zmq.proxy(self.subscriber, self.publisher)
except Exception as e:
logging.debug("Error message: {0}.".format(e))
logging.info("Shutting down the trivial message bus.")
finally:
self.cleanup()
def cleanup(self):
""" cleaning up """
logging.debug("cleanup")
logging.debug("Closing publisher")
self.publisher.close()
logging.debug("Closing subscriber")
self.subscriber.close()
logging.debug("Terminating context")
self.context.term()
if __name__=="__main__":
""" A short main block to allow direct usage. """
import argparse
parser = argparse.ArgumentParser(description="Trivial Messagebus")
parser.add_argument("-l", "--loglevel",
help="set the log level",
choices=["notset", "debug", "info", "warning", "error", "critical"],
default="critical")
parser.add_argument("-s", "--subscriber_port",
help="set the port for the subscriber", type=int,
default=12345)
parser.add_argument("-p", "--publisher_port",
help="set the port for the publisher", type=int,
default=12346)
config=parser.parse_args()
log_nlevel=getattr(logging, config.loglevel.upper(), None)
logging.basicConfig(format="%(asctime)s - %(message)s",
datefmt="%Y-%m-%d %H:%M:%S", level=log_nlevel)
tmb=TrivialMessageBus(config.subscriber_port, config.publisher_port)
tmb.run()
``` |
{
"source": "jnejati/WprofX",
"score": 2
} |
#### File: jnejati/WprofX/main_mobile.py
```python
__author__ = 'jnejati'
#import experiments
import json
import signal
import pickle
import webDnsSetupMobile
import network_emulator
import os
import convert
from urllib.parse import urlparse
import time
#import modifications as modify
#from bs4 import BeautifulSoup
import urllib.request
import urllib.response
import io
import gzip
import subprocess
import logging
#import coloredlogs
#coloredlogs.install(level='INFO')
import timeit
def _change_resolv_conf():
RESOLV_CONF = '/etc/resolv.conf'
with open (RESOLV_CONF, 'w') as _f:
_f.write('nameserver 127.0.0.1\n')
def main():
start = timeit.default_timer()
input_file = 'mixed-mobile.txt'
archive_dir = '/home/jnejati/PLTSpeed/record/archive-m/'
config_file = '/home/jnejati/PLTSpeed/confs/netProfiles.json'
_change_resolv_conf()
with open(config_file, 'r') as f:
net_profile = json.load(f)[0]
_path = os.path.join('/home/jnejati/PLTSpeed', net_profile['device_type'] + '_' + net_profile['name'])
webDnsSetupMobile.clear_folder(_path)
with open('/home/jnejati/PLTSpeed/res/' + input_file) as _sites:
for _site in _sites:
#_chrome_process = subprocess.Popen(_remote_debugging_cmd)
_site = _site.strip()
logging.info('Navigating to: ' + _site)
s1 = urlparse(_site)
_site_data_folder = os.path.join(_path, s1.netloc)
if not os.path.isdir(_site_data_folder):
os.mkdir(_site_data_folder)
os.mkdir(os.path.join(_site_data_folder, 'dns'))
_d_ip_dict = webDnsSetupMobile.setup_ip_subdomain(s1.netloc, archive_dir)
webDnsSetupMobile.setup_nameserver(_d_ip_dict)
webDnsSetupMobile.setup_webserver(s1.netloc, archive_dir, _d_ip_dict)
print('Starting runs:')
for run_no in range(10):
_run_data_folder = os.path.join(_site_data_folder, 'run_' + str(run_no))
if not os.path.isdir(_run_data_folder):
os.mkdir(_run_data_folder)
_subfolders = ['trace', 'screenshot', 'analysis', 'summary', 'tcpdump', 'perf']
for folder in _subfolders:
os.mkdir(os.path.join(_run_data_folder, folder))
logging.info('Current profile: ' + net_profile['device_type'] + ' - ' + net_profile['name'] + ' run_no: ' + str(run_no) + ' site: ' + _site)
#netns = network_emulator.NetworkEmulator(net_profile, dirs)
#netns.set_profile(net_profile['conn_type'])
os.system('pkill node')
time.sleep(15)
_trace_folder = os.path.join(_run_data_folder, 'trace')
_screenshot_folder = os.path.join(_run_data_folder, 'screenshot')
_summary_folder = os.path.join(_run_data_folder, 'summary')
_trace_file = os.path.join(_trace_folder, str(run_no) + '_' + s1.netloc)
_screenshot_file = os.path.join(_screenshot_folder, str(run_no) + '_' + s1.netloc)
_summary_file = os.path.join(_summary_folder, str(run_no) + '_' + s1.netloc)
logging.info(_trace_file, _screenshot_file, _summary_file)
time.sleep(5)
try:
_node_cmd = ['node', 'chrome_launcher.js', _site, _trace_file, _summary_file, _screenshot_file]
subprocess.call(_node_cmd, timeout = 110)
except subprocess.TimeoutExpired:
print("Timeout: ", _site, run_no)
with open (os.path.join(_site_data_folder, 'log.txt'), 'w+') as _log:
_log.write("Timed out: " + _site + ' ' + str(run_no) + '\n')
time.sleep(15)
pickle.dump(_d_ip_dict, open(os.path.join(_site_data_folder, 'dns/dnsBackup.txt'), 'wb'))
time.sleep(2)
stop = timeit.default_timer()
logging.info(100*'-' + '\nTotal time: ' + str(stop -start))
if __name__ == '__main__':
main()
```
#### File: jnejati/WprofX/waterfall_draw.py
```python
from urllib.parse import urldefrag
from bokeh.models import LinearAxis, Range1d, CustomJS, HoverTool, BoxSelectTool
from bokeh.plotting import figure, output_file, show, ColumnDataSource
from bokeh.embed import components
from bokeh.plotting import figure
from bokeh.resources import INLINE
from bokeh.util.string import encode_utf8
import json
class DrawWaterfall():
def __init__(self, jsonFile, outputFile, lookup_id, order_lookup):
self.json_file = jsonFile
with open(self.json_file) as data_file:
self.data = json.load(data_file)
# end_time = data[-1][1]['endTime'] + 500
self.end_time = 5000
self.y_range = len(self.data) + 10
self.line_width = 4
output_file(outputFile)
self.yr = Range1d(start=self.y_range, end=0)
self.xr = Range1d(start=0, end=1.05 * self.end_time)
self.lookup_id = lookup_id
self.order_lookup = order_lookup
hover = HoverTool(
tooltips="""
<div style='padding: 3px; width: 500px; word-break: break-all; word-wrap: break-word; text-align: left;'>
<div>
<div>
<span style="font-weight: bold; font-size: 9px;">@desc</span>
</div>
</div>
<div>
<div>
<span style=" font-size: 8px;">@o_url</span>
</div>
</div>
<div>
<div>
<span style="font-size: 9px;">@o_size</span>
</div>
</div>
<div>
<div>
<span style="font-size: 11px;">@o_stime</span>
</div>
</div>
<div>
<div>
<span style="font-size: 11px;">@o_etime</span>
</div>
</div>
<div>
<div>
<span style="font-size: 11px;">@o_time</span>
</div>
</div>
</div>
"""
)
self.p = figure(plot_width=1250, plot_height=2100, tools=[hover, 'save,pan,wheel_zoom,box_zoom,reset,resize'],
y_range=self.yr,
x_range=self.xr, x_axis_location="above")
# p.ygrid.grid_line_color = None
self.p.xaxis.axis_label = 'Time (ms)'
self.p.xaxis.axis_label_text_align = 'left'
self.p.xaxis.axis_label_text_color = "#c8c8c8"
self.p.xaxis.axis_label_text_font_size = '10pt'
self.p.xaxis.axis_line_color = '#c8c8c8'
self.p.xaxis.major_tick_line_color = '#c8c8c8'
self.p.xaxis.major_label_text_color = '#c8c8c8'
self.p.xaxis.major_label_text_align = 'left'
self.p.xaxis.major_label_text_font_size = '10pt'
self.p.xaxis.minor_tick_line_color = '#c8c8c8'
self.p.xaxis.minor_tick_out = 0
self.p.xgrid.grid_line_alpha = 0.5
self.p.ygrid.grid_line_color = None
self.p.yaxis.visible = False
self.javascript_type_list = ['application/x-javascript', 'application/javascript', 'application/ecmascript',
'text/javascript', 'text/ecmascript', 'application/json', 'javascript/text']
self.css_type_list = ['text/css', 'css/text']
self.text_type_list = ['evalhtml', 'text/html', 'text/plain', 'text/xml']
self.colormap = dict(ctext='#2757ae', dtext="#a8c5f7", cjs="#c9780e", djs='#e8ae61', ccss="#13bd0d",
dcss='#8ae887',
cother="#eb5bc0", dother='#eb5bc0', img='#c79efa')
def draw_from_json(self):
for _index, _event in enumerate(self.data):
if not _event['id'] == 'Deps':
for _obj in _event['objs']:
_nodeId = _obj[0]
_nodeData = _obj[1]
try:
_startTime = round(_nodeData['startTime'], 2)
except:
print(_nodeData, _nodeData)
continue
try:
_endTime = round(_nodeData['endTime'], 2)
except:
print(_nodeId, _nodeData)
continue
_duration = round(_endTime - _startTime, 2)
##########################################################################################
# Network
##########################################################################################
if _nodeId.startswith('Network'):
if 'transferSize' in _nodeData:
_transferSize = _nodeData['transferSize']
else:
_transferSize = 0
_url = _nodeData['url']
_mimeType = _nodeData['mimeType']
y_index = (_index + 1)
if _mimeType in self.text_type_list:
color = self.colormap['dtext']
elif _mimeType in self.css_type_list:
color = self.colormap['dcss']
elif _mimeType in self.javascript_type_list:
color = self.colormap['djs']
elif _mimeType.startswith('image'):
color = self.colormap['img']
else:
color = self.colormap['dother']
_mimeType = _nodeId + ': ' + _nodeData['mimeType']
source = ColumnDataSource(
data=dict(
x=[_startTime, _endTime],
y=[y_index, y_index],
desc=[_mimeType, _mimeType],
o_url=[_url, _url],
o_size=[_transferSize, _transferSize],
o_stime=['s: ' + str(_startTime) + ' ms', 's: ' + str(_startTime) + ' ms'],
o_etime=['e: ' + str(_endTime) + ' ms', 'e: ' + str(_endTime) + ' ms'],
o_time=['dur: ' + str(_duration) + ' ms', 'dur: ' + str(_duration) + ' ms']
))
r = self.p.line('x', 'y', source=source,
line_color=color,
line_width=self.line_width, line_cap='round', name='myline')
##########################################################################################
# Loading
##########################################################################################
elif _nodeId.startswith('Loading'):
_desc = _nodeData['name'] + ': ' + _nodeId
_url = ' '
_styleSheetUrl = ' '
if _nodeData['name'] == 'ParseHTML' and 'url' in _nodeData:
if _nodeData['url'] is not None:
_url = _nodeData['url']
y_index = _index + 1
color = self.colormap['ctext']
else:
continue
elif _nodeData['name'] == 'ParseAuthorStyleSheet' and 'styleSheetUrl' in _nodeData:
if _nodeData['styleSheetUrl'] is not None:
_styleSheetUrl = _nodeData['styleSheetUrl']
y_index = _index + 1
color = self.colormap['ccss']
else:
continue
source = ColumnDataSource(
data=dict(
x=[_startTime, _endTime],
y=[y_index, y_index],
desc=[_desc, _desc],
o_url=[_url, _url],
o_size=[_styleSheetUrl, _styleSheetUrl],
o_stime=['s: ' + str(_startTime) + ' ms', 's: ' + str(_startTime) + ' ms'],
o_etime=['e: ' + str(_endTime) + ' ms', 'e: ' + str(_endTime) + ' ms'],
o_time=['dur: ' + str(_duration) + ' ms', 'dur: ' + str(_duration) + ' ms']
))
r = self.p.line('x', 'y', source=source,
line_color=color,
line_width=self.line_width, line_cap='round', name='myline')
##########################################################################################
# Scripting
##########################################################################################
elif _nodeId.startswith('Scripting'):
_url = _nodeData['url']
_desc = _nodeId
color = self.colormap['cjs']
y_index = _index + 1
source = ColumnDataSource(
data=dict(
x=[_startTime, _endTime],
y=[y_index, y_index],
desc=[_desc, _desc],
o_url=[_url, _url],
o_size=['Scripting', 'Scripting'],
o_stime=['s: ' + str(_startTime) + ' ms', 's: ' + str(_startTime) + ' ms'],
o_etime=['e: ' + str(_endTime) + ' ms', 'e: ' + str(_endTime) + ' ms'],
o_time=['dur: ' + str(_duration) + ' ms', 'dur: ' + str(_duration) + ' ms']
))
r = self.p.line('x', 'y', source=source,
line_color=color,
line_width=self.line_width, line_cap='round', name='myline')
##########################################################################################
# Rendering
##########################################################################################
elif _nodeId.startswith('Rendering'):
_desc = _nodeData['name']
color = '#9b82e3'
if _desc == 'UpdateLayerTree':
y_index = (len(self.data) + 1)
elif _desc == 'Layout':
y_index = (len(self.data) + 2)
elif _desc == 'HitTest':
y_index = (len(self.data) + 3)
elif _desc == 'RecalculateStyles':
y_index = (len(self.data) + 4)
source = ColumnDataSource(
data=dict(
x=[_startTime, _endTime],
y=[y_index, y_index],
desc=[_desc + ': ', _desc + ': '],
o_url=['', ''],
o_size=['Rendering', 'Rendering'],
o_stime=['s: ' + str(_startTime) + ' ms', 's: ' + str(_startTime) + ' ms'],
o_etime=['e: ' + str(_endTime) + ' ms', 'e: ' + str(_endTime) + ' ms'],
o_time=['dur: ' + str(_duration) + ' ms', 'dur: ' + str(_duration) + ' ms']
))
r = self.p.line('x', 'y', source=source,
line_color=color,
line_width=self.line_width, line_cap='round', name='myline')
##########################################################################################
# Painting is one thread
##########################################################################################
elif _nodeId.startswith('Paint'):
_desc = _nodeData['name']
color = '#76b169'
y_index = (len(self.data) + 5)
source = ColumnDataSource(
data=dict(
x=[_startTime, _endTime],
y=[y_index, y_index],
desc=[_desc + ': ', _desc + ': '],
o_url=['', ''],
o_size=['Painting', 'Painting'],
o_stime=['s: ' + str(_startTime) + ' ms', 's: ' + str(_startTime) + ' ms'],
o_etime=['e: ' + str(_endTime) + ' ms', 'e: ' + str(_endTime) + ' ms'],
o_time=['dur: ' + str(_duration) + ' ms', 'dur: ' + str(_duration) + ' ms']
))
r = self.p.line('x', 'y', source=source,
line_color=color,
line_width=self.line_width, name='myline')
def draw_critical_path(self, cp):
i = 0
for _dep in self.data[-1]['objs']:
a1_id = _dep['a1']
a2_id = _dep['a2']
if (a1_id in cp) and (a2_id == cp[cp.index(a1_id) + 1]):
#print(a1_id, a2_id, i)
a1_start = self.lookup_id[a1_id]['startTime']
a2_start = self.lookup_id[a2_id]['startTime']
a1_end = self.lookup_id[a1_id]['endTime']
a2_end = self.lookup_id[a2_id]['endTime']
a1_y = self.order_lookup[a1_id] + 1
a2_y = self.order_lookup[a2_id] + 1
if a1_y == a2_y:
if not _dep['time'] == -1:
a1_end = _dep['time']
self.p.line([a1_end, a2_start], [a1_y, a1_y], line_color='red',
line_width=2, line_cap='square')
else:
if not _dep['time'] == -1:
a1_end = _dep['time']
self.p.line([a1_end, a1_end], [a1_y, a2_y], line_color='red',
line_width=2, line_cap='square')
if a1_end < a2_start: #???
self.p.circle([a1_end], [a2_y], line_color='red', size=1)
self.p.line([a1_end, a2_start], [a2_y, a2_y], line_color='red',
line_width=2, line_cap='square')
def showPlot(self):
show(self.p)
def draw_from_dict(self, y_order_url_lookup, data):
for obj in data:
_nodeId = obj[0]
_nodeData = obj[1]
try:
_startTime = round(_nodeData['startTime'], 2)
except:
print(_nodeData, _nodeData)
continue
try:
_endTime = round(_nodeData['endTime'], 2)
except:
print(_nodeId, _nodeData)
continue
_duration = round(_endTime - _startTime, 2)
##########################################################################################
# Network
##########################################################################################
if _nodeId.startswith('Network'):
if 'transferSize' in _nodeData:
_transferSize = _nodeData['transferSize']
else:
_transferSize = 0
_url = _nodeData['url']
_mimeType = _nodeData['mimeType']
y_index = (y_order_url_lookup[urldefrag(_url)[0]] + 1)
if _mimeType in self.text_type_list:
color = self.colormap['dtext']
elif _mimeType in self.css_type_list:
color = self.colormap['dcss']
elif _mimeType in self.javascript_type_list:
color = self.colormap['djs']
elif _mimeType.startswith('image'):
color = self.colormap['img']
else:
color = self.colormap['dother']
_mimeType = _nodeId + ': ' + _nodeData['mimeType']
source = ColumnDataSource(
data=dict(
x=[_startTime, _endTime],
y=[y_index, y_index],
desc=[_mimeType, _mimeType],
o_url=[_url, _url],
o_size=[_transferSize, _transferSize],
o_stime=['s: ' + str(_startTime) + ' ms', 's: ' + str(_startTime) + ' ms'],
o_etime=['e: ' + str(_endTime) + ' ms', 'e: ' + str(_endTime) + ' ms'],
o_time=['dur: ' + str(_duration) + ' ms', 'dur: ' + str(_duration) + ' ms']
))
r = self.p.line('x', 'y', source=source,
line_color=color,
line_width=self.line_width, line_cap='round', name='myline')
##########################################################################################
# Loading
##########################################################################################
elif _nodeId.startswith('Loading'):
_desc = _nodeData['name']
_url = ' '
_styleSheetUrl = ' '
if _desc == 'ParseHTML' and 'url' in _nodeData:
if _nodeData['url'] is not None:
_url = _nodeData['url']
y_index = (y_order_url_lookup[urldefrag(_url)[0]] + 1)
color = self.colormap['ctext']
else:
continue
elif _desc == 'ParseAuthorStyleSheet' and 'styleSheetUrl' in _nodeData:
if _nodeData['styleSheetUrl'] is not None:
_styleSheetUrl = _nodeData['styleSheetUrl']
y_index = (y_order_url_lookup[urldefrag(_styleSheetUrl)[0]] + 1)
color = self.colormap['ccss']
else:
continue
source = ColumnDataSource(
data=dict(
x=[_startTime, _endTime],
y=[y_index, y_index],
desc=[_desc, _desc],
o_url=[_url, _url],
o_size=[_styleSheetUrl, _styleSheetUrl],
o_stime=['s: ' + str(_startTime) + ' ms', 's: ' + str(_startTime) + ' ms'],
o_etime=['e: ' + str(_endTime) + ' ms', 'e: ' + str(_endTime) + ' ms'],
o_time=['dur: ' + str(_duration) + ' ms', 'dur: ' + str(_duration) + ' ms']
))
r = self.p.line('x', 'y', source=source,
line_color=color,
line_width=self.line_width, line_cap='round', name='myline')
##########################################################################################
# Scripting
##########################################################################################
elif _nodeId.startswith('Scripting'):
_url = _nodeData['url']
_desc = ' '
color = self.colormap['cjs']
y_index = (y_order_url_lookup[urldefrag(_url)[0]] + 1)
source = ColumnDataSource(
data=dict(
x=[_startTime, _endTime],
y=[y_index, y_index],
desc=[_desc, _desc],
o_url=[_url, _url],
o_size=['Scripting', 'Scripting'],
o_stime=['s: ' + str(_startTime) + ' ms', 's: ' + str(_startTime) + ' ms'],
o_etime=['e: ' + str(_endTime) + ' ms', 'e: ' + str(_endTime) + ' ms'],
o_time=['dur: ' + str(_duration) + ' ms', 'dur: ' + str(_duration) + ' ms']
))
r = self.p.line('x', 'y', source=source,
line_color=color,
line_width=self.line_width, line_cap='round', name='myline')
##########################################################################################
# Rendering
##########################################################################################
elif _nodeId.startswith('Rendering'):
_desc = _nodeData['name']
color = '#9b82e3'
if _desc == 'UpdateLayerTree':
y_index = (len(y_order_url_lookup) + 1)
elif _desc == 'Layout':
y_index = (len(y_order_url_lookup) + 2)
elif _desc == 'HitTest':
y_index = (len(y_order_url_lookup) + 3)
elif _desc == 'RecalculateStyles':
y_index = (len(y_order_url_lookup) + 4)
source = ColumnDataSource(
data=dict(
x=[_startTime, _endTime],
y=[y_index, y_index],
desc=[_desc + ': ', _desc + ': '],
o_url=['', ''],
o_size=['Rendering', 'Rendering'],
o_stime=['s: ' + str(_startTime) + ' ms', 's: ' + str(_startTime) + ' ms'],
o_etime=['e: ' + str(_endTime) + ' ms', 'e: ' + str(_endTime) + ' ms'],
o_time=['dur: ' + str(_duration) + ' ms', 'dur: ' + str(_duration) + ' ms']
))
r = self.p.line('x', 'y', source=source,
line_color=color,
line_width=self.line_width, line_cap='round', name='myline')
##########################################################################################
# Painting
##########################################################################################
elif _nodeId.startswith('Paint'):
_desc = _nodeData['name']
color = '#76b169'
y_index = (len(y_order_url_lookup) + 5)
source = ColumnDataSource(
data=dict(
x=[_startTime, _endTime],
y=[y_index, y_index],
desc=[_desc + ': ', _desc + ': '],
o_url=['', ''],
o_size=['Painting', 'Painting'],
o_stime=['s: ' + str(_startTime) + ' ms', 's: ' + str(_startTime) + ' ms'],
o_etime=['e: ' + str(_endTime) + ' ms', 'e: ' + str(_endTime) + ' ms'],
o_time=['dur: ' + str(_duration) + ' ms', 'dur: ' + str(_duration) + ' ms']
))
r = self.p.line('x', 'y', source=source,
line_color=color,
line_width=self.line_width, name='myline')
def draw_dependents(self, dep):
a1_id = dep['a1']
a2_id = dep['a2']
a1_start = self.lookup_id[a1_id]['startTime']
a2_start = self.lookup_id[a2_id]['startTime']
a1_end = self.lookup_id[a1_id]['endTime']
a2_end = self.lookup_id[a2_id]['endTime']
a1_y = self.order_lookup[a1_id] + 1
a2_y = self.order_lookup[a2_id] + 1
if a1_y == a2_y:
if not dep['time'] == -1:
a1_end = dep['time']
self.p.line([a1_end, a2_start ], [a1_y, a1_y], line_color='black',
line_width=1, line_cap='square')
else:
if not dep['time'] == -1:
a1_end = dep['time']
self.p.line([a1_end, a1_end ], [a1_y, a2_y], line_color='black',
line_width=0.5, line_cap='square')
if a1_end < a2_start:
self.p.circle([a1_end], [a2_y], line_color='black', size = 2)
self.p.line([a1_end, a2_start], [a2_y, a2_y], line_color='black',
line_width=0.5, line_cap='square')
def draw_all_dependency(self):
for dep in self.data[-1]['objs']:
self.draw_dependents(dep)
#_plot = DrawWaterfall('./results/zdnet.json', 'line.html')
#_plot.draw_from_json()
#_plot.draw_dependency()
#_plot.showPlot()
# draw dep
```
#### File: jnejati/WprofX/webDnsSetup_live.py
```python
import errno
import random
import sys
import subprocess
import os
import shutil
from urllib.parse import urlparse
import time
from OpenSSL import crypto, SSL
from socket import gethostname
from pprint import pprint
from time import gmtime, mktime
import tldextract
def create_self_signed_cert(cert_dir, key_dir, domain_name):
SYSTEM_CERT_DIR = '/usr/local/share/ca-certificates'
DOMAIN_SYS_DIR = os.path.join(SYSTEM_CERT_DIR, domain_name)
CERT_FILE = domain_name + '.crt'
KEY_FILE = domain_name + '.key'
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 1024)
# create a self-signed cert
cert = crypto.X509()
cert.get_subject().C = "US"
cert.get_subject().ST = "New York"
cert.get_subject().L = "<NAME>"
cert.get_subject().O = "Computer Science"
cert.get_subject().OU = "NetSys"
cert.get_subject().CN = domain_name
cert.set_serial_number(int(random.randint(0, 1000000000)))
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(10*365*24*60*60)
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha1')
#print(cert_dir, CERT_FILE, crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
open(os.path.join(cert_dir, CERT_FILE), "wb").write(
crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
open(os.path.join(key_dir, KEY_FILE), "wb").write(
crypto.dump_privatekey(crypto.FILETYPE_PEM, k))
clear_folder(DOMAIN_SYS_DIR)
system_cert_domain = os.path.join(DOMAIN_SYS_DIR, CERT_FILE)
shutil.copy2(os.path.join(cert_dir, CERT_FILE), system_cert_domain)
#print(' '.join(['certutil', '-d', 'sql:/home/jnejati/.pki/nssdb','-A','-t', '"C,,"', '-n', domain_name, '-i', system_cert_domain]))
#subprocess.call(['certutil', '-d', 'sql:/home/jnejati/.pki/nssdb','-D','-t', '"C,,"', '-n', domain_name, '-i', system_cert_domain])
#subprocess.call(['certutil', '-d', 'sql:/home/jnejati/.pki/nssdb','-A','-t', '"C,,"', '-n', domain_name, '-i', system_cert_domain])
os.system('certutil -d sql:/home/jnejati/.pki/nssdb -D -t "C,," -n ' + domain_name + ' -i ' + system_cert_domain)
os.system('certutil -d sql:/home/jnejati/.pki/nssdb -A -t "C,," -n ' + domain_name + ' -i ' + system_cert_domain)
def clear_folder(folder):
if os.path.isdir(folder):
for root, dirs, l_files in os.walk(folder):
for f in l_files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
else:
os.makedirs(folder)
def copytree(src, dst, symlinks=False, ignore=None):
for item in os.listdir(src):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
shutil.copytree(s, d, symlinks, ignore)
else:
shutil.copy2(s, d)
def copyanything(src, dst):
try:
copytree(src, dst)
except OSError as exc: # python >2.5
if exc.errno == errno.ENOTDIR:
shutil.copy(src, dst)
else: raise
def setup_webserver(domain, archive_dir, _d_ip_dict):
print('domain ', domain)
os.system('pkill nginx')
time.sleep(1)
_cert_dir = '/home/jnejati/PLTSpeed/confs/certs'
_key_dir = '/home/jnejati/PLTSpeed/confs/keys'
_dest = '/var/www/'
_src = os.path.join(archive_dir, domain)
clear_folder(_dest)
clear_folder(_cert_dir)
clear_folder(_key_dir)
copyanything(_src, _dest)
nginx_file_path = '/etc/nginx/nginx.conf'
nginx_f = open(nginx_file_path, 'w')
out = """user nginx;
worker_processes 1;
worker_rlimit_nofile 30000;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx-new.pid;
events {
worker_connections 1024;
}
http {
server_names_hash_bucket_size 4096;
include /etc/nginx/mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
sendfile on;
keepalive_timeout 65;\n"""
for _domain, sd_ip in _d_ip_dict.items():
for _subdomain_ip in sd_ip:
for _subdomain, _ip in _subdomain_ip.items():
if _subdomain == '@':
_site = _domain
else:
_site = _subdomain + '.' + _domain
create_self_signed_cert(_cert_dir, _key_dir, _site)
out = out + """server {
listen %s:80;
listen %s:443 ssl;
server_name %s;
access_log /var/log/nginx/%s.access.log main;
ssl_certificate /home/jnejati/PLTSpeed/confs/certs/%s.crt;
ssl_certificate_key /home/jnejati/PLTSpeed/confs/keys/%s.key;
location / {
root /var/www/%s;
index index.html index.htm, index.php;
}
index index.html index.htm;
}\n""" % (_ip, _ip, _site, _site, _site, _site, _site)
out = out + '}\n'
#print(out)
nginx_f.write(out)
nginx_f.close()
#subprocess.call(['dpkg-reconfigure', 'ca-certificates'])
subprocess.call(['/usr/sbin/nginx', '-c', '/etc/nginx/nginx.conf'])
def setup_ip_subdomain(domain, archive_dir):
_domains = os.listdir(os.path.join(archive_dir, domain))
_d_ip_dict = {}
for i in range(len(_domains)):
_set_ip_alias = ['ifconfig', 'enp1s0f0:' + str(10 + i), '192.168.1.' + str(10 + i), 'up']
subprocess.call(_set_ip_alias)
_interface_id = 10
for _d in _domains:
if not _d == 'trace' and not _d =='screenshot':
_ext = tldextract.extract(_d)
_subdomain = _ext.subdomain
_domain = _ext.domain
_suffix = _ext.suffix
_interface_id += 1
_ip = '192.168.1.' + str(_interface_id)
if not _subdomain == '':
_d_ip_dict.setdefault(_domain + '.' + _suffix, []).append({_subdomain:_ip})
else:
_d_ip_dict.setdefault(_domain + '.' + _suffix, []).append({'@':_ip})
return _d_ip_dict
def setup_nameserver(_d_ip_dict):
bind_file_path = '/etc/bind/named.conf.local'
bind_f = open(bind_file_path, 'w')
bind_f_text = ""
for _domain, sd_ip in _d_ip_dict.items():
bind_f_text = bind_f_text + """zone "%s" IN {
type master;
file "/var/lib/bind/db.%s";
};\n""" % (_domain, _domain)
out = """$TTL 3H
@ IN SOA @ hostmaster.%s. (
0 ; serial
3H ; refresh
1H ; retry
1W ; expire
3H ) ; minimum
@ IN NS ns1.%s.
ns1 IN A 192.168.1.2
""" % (_domain, _domain)
for _subdomain_ip in sd_ip:
for _subdomain, _ip in _subdomain_ip.items():
#print(_subdomain, _domain, _ip)
out = out + '%s IN A %s\n' % (_subdomain, _ip)
#print(out)
target = open('/var/lib/bind/db.%s' % _domain, 'w')
target.write(out)
target.close()
bind_f.write(bind_f_text)
bind_f.close()
subprocess.call(['/etc/init.d/bind9', 'restart'])
#setup_ip_subdomain('stackoverflow.com', '/home/jnejati/PLTSpeed/record/archive')
``` |
{
"source": "jnejc/Mercury_control",
"score": 3
} |
#### File: Mercury_control/gui/ips.py
```python
import tkinter as tk # Gui package
from tkinter import ttk # Fancier widgets
from tkinter import messagebox
from gui.funct import Strip_T, List_sensors
# Logging
import datetime # A format for dates and time
import csv # For importing and exporting CSV files
import os # For compiling directory paths
import logging
logger = logging.getLogger('log') # Set the logger
# Global variables
LOG_TIME = 20*60*1000 # 20 min
SWITCH_TOLERANCE = 0.001 # Heater switch tolerance
MAIN_PSU = 'GRPZ' # Main board for driving magnet
MAIN_LVL = 'DB5.L1' # Main board for cryo level sensors
MAIN_TEMP = 'MB1.T1' # Main board for temperature sensor
class IPS_frame(tk.Frame):
'''The controll frame for IPS'''
def __init__(self, parent, ports):
tk.Frame.__init__(self, parent, width=300, height=900, padx=5, pady=5)
self.parent = parent
self.ports = ports
self.Widgets()
# Fill in parameters from iPS
self.Load_parameters()
def Widgets(self):
'''Shapes the frame's widgets'''
# Label
self.label_title = ttk.Label(self, text='Mercury iPS')
self.label_title.config(font=('Courier', 16))
self.label_title.pack(side='top', fill='x', padx=10)
# Status framerts
self.frame_status = Status(self, self.ports)
self.frame_status.pack(side='top', fill='x', padx=5, pady=5)
# Set frame
self.frame_set = SetF(self, self.ports)
self.frame_set.pack(side='top', fill='x', padx=5, pady=5)
# Switch frame
self.frame_switch = Switch(self, self.ports)
self.frame_switch.pack(side='top', fill='x', padx=5, pady=5)
# Ramp frame
self.frame_ramp = Ramp(self, self.ports)
self.frame_ramp.pack(side='top', fill='x', padx=5, pady=5)
# Sensor frame
self.frame_sensors = Sensors(self, self.ports)
self.frame_sensors.pack(side='top', fill='x', padx=5, pady=5)
# Select frame
self.frame_select = Select(self, self.ports)
self.frame_select.pack(side='top', fill='x', padx=5 , pady=5)
# Load parameters
self.button_load = ttk.Button(self, text='Load from iPS',
command=self.Load_parameters, width=20)
self.button_load.pack(side='top',pady=10)
def Load_parameters(self):
'''Talks to IPS and refreshes all values in entry boxes'''
logger.info('Loading IPS parameters from:'+
self.frame_select.var_sens.get())
flog = self.ports.Get_Fstatus(self.frame_select.var_sens.get())
fset = self.ports.Get_Fset(self.frame_select.var_sens.get())
fmode = self.ports.Get_Fmode(self.frame_select.var_sens.get())
fsensors = self.ports.Get_Fsensors(self.frame_select.var_sens.get(),
self.frame_select.var_lvl.get(),self.frame_select.var_temp.get())
self.frame_status.Update(flog)
self.frame_set.Update(fset)
self.frame_switch.Update(fmode[0])
self.frame_ramp.Update(fmode[1])
self.frame_sensors.Update(fsensors)
def Monitor_ramp(self):
'''Talks to iPS and refreshes values in ramp and status'''
flog = self.ports.Get_Fstatus(self.frame_select.var_sens.get())
fmode = self.ports.Get_Fmode(self.frame_select.var_sens.get())
self.frame_status.Update(flog)
self.frame_switch.Update(fmode[0])
self.frame_ramp.Update(fmode[1])
class Status(tk.LabelFrame):
'''Status frame and inner objects'''
def __init__(self, parent, ports):
'''Calls init method of LabelFrame and fills up frame'''
tk.LabelFrame.__init__(self, parent, text='Status', padx=10, pady=5)
self.parent = parent
self.ports = ports
self.Widgets()
def Widgets(self):
'''Shapes the frame's widgets'''
# Labels
ttk.Label(self, text='Field').grid(row=0, column=0)
ttk.Label(self, text='Field target').grid(row=0, column=2)
ttk.Label(self, text='Voltage').grid(row=2, column=0)
ttk.Label(self, text='Current').grid(row=2, column=2)
ttk.Label(self, text='Actual field rate').grid(row=4, column=0)
ttk.Label(self, text='Persistent field').grid(row=4, column=2)
# Spacer
ttk.Label(self, text=' ').grid(row=0,column=1)
self.grid_columnconfigure(1, weight=1) # Alows stretch and centering
# Entries
self.var_field = tk.StringVar(self)
self.entry_field = ttk.Entry(self, textvariable=self.var_field,
justify='center', width=14, state='readonly')
self.entry_field.grid(row=1, column=0)
self.var_fset = tk.StringVar(self)
self.entry_fset = ttk.Entry(self, textvariable=self.var_fset,
justify='center', width=14, state='readonly')
self.entry_fset.grid(row=1, column=2)
self.var_voltage = tk.StringVar(self)
self.entry_voltage = ttk.Entry(self, textvariable=self.var_voltage,
justify='center', width=14, state='readonly')
self.entry_voltage.grid(row=3, column=0)
self.var_current = tk.StringVar(self)
self.entry_current = ttk.Entry(self, textvariable=self.var_current,
justify='center', width=14, state='readonly')
self.entry_current.grid(row=3, column=2)
self.var_frate = tk.StringVar(self)
self.entry_frate = ttk.Entry(self, textvariable=self.var_frate,
justify='center', width=14, state='readonly')
self.entry_frate.grid(row=5, column=0)
self.var_pfield = tk.StringVar(self)
self.entry_pfield = ttk.Entry(self, textvariable=self.var_pfield,
justify='center', width=14, state='readonly')
self.entry_pfield.grid(row=5, column=2)
def Update(self, flog):
'''Updates values in entries from iPS'''
logger.info('Updating IPS status: '+ str(flog))
self.var_field.set(flog[1])
self.var_fset.set(flog[2])
self.var_voltage.set(flog[3])
self.var_current.set(flog[4])
self.var_frate.set(flog[5])
self.var_pfield.set(flog[6])
class SetF(tk.LabelFrame):
'''Set field frame and inner objects'''
def __init__(self, parent, ports):
'''Calls init method of LabelFrame and fills up frame'''
tk.LabelFrame.__init__(self, parent, text='Set Field', padx=10,
pady=5)
self.parent = parent
self.ports = ports
self.Widgets()
def Widgets(self):
'''Shapes the frame's widgets'''
# Labels
ttk.Label(self, text='Target field:').grid(row=0, column=0, sticky='E')
ttk.Label(self, text='Ramp rate:').grid(row=1, column=0, sticky='E')
ttk.Label(self, text='Confirm').grid(row=2, column=0, sticky='E')
ttk.Label(self, text='T').grid(row=0, column=3, sticky='W')
ttk.Label(self, text='T/min').grid(row=1, column=3, sticky='W')
# Spacer
ttk.Label(self, text=' ').grid(row=0, column=1)
self.grid_columnconfigure(1, weight=1) # Alows stretch and centering
# Entries
self.var_set = tk.StringVar(self)
self.entry_set = ttk.Entry(self, width=7, textvariable=self.var_set,
justify='right')
self.entry_set.grid(row=0, column=2)
self.var_rate = tk.StringVar(self)
self.entry_rate = ttk.Entry(self, width=7, textvariable=self.var_rate,
justify='right')
self.entry_rate.grid(row=1, column=2)
# Button
self.button_set = ttk.Button(self, text='Set field',
command=self.Set, width=10)
self.button_set.grid(row=2, column=2, columnspan=2)
def Set(self):
'''Confirms written values and sends to iPS'''
values = [
self.var_set.get(),
self.var_rate.get()
]
logger.info(('Setting Field:', values))
sens = self.parent.frame_select.var_sens.get()
logger.debug(sens)
self.ports.Set_Fset(sens, values)
def Update(self, fset):
'''Updates previously set values from iPS'''
logger.info('Updating previously set field values from IPS: '+str(fset))
self.var_set.set(Strip_T(fset[0]))
self.var_rate.set(Strip_T(fset[1]))
class Switch(tk.LabelFrame):
'''Switch heater frame and inner objects'''
def __init__(self, parent, ports):
'''Calls init method of LabelFrame and fills up frame'''
tk.LabelFrame.__init__(self, parent, text='Switch heater mode',
padx=10, pady=5)
self.parent = parent
self.ports = ports
self.Widgets()
def Widgets(self):
'''Shapes the frame's widgets'''
# Spacer
self.grid_columnconfigure(0, weight=1) # Alows stretch and centering
# Radio button switch heater
self.list_switch = ['ON', 'OFF']
self.var_switch = tk.IntVar(self)
self.var_switch.set(-1) # Doesnt show an option on start
self.radio_switch1 = ttk.Radiobutton(self, text='On', value=0,
variable=self.var_switch)
self.radio_switch1.grid(row=1, column=0, sticky='W')
self.radio_switch2 = ttk.Radiobutton(self, text='Off', value=1,
variable=self.var_switch)
self.radio_switch2.grid(row=2, column=0, sticky='W')
# Button
self.button_set = ttk.Button(self, text='Set',
command=self.Set, width=10)
self.button_set.grid(row=8, column=0)
def Set(self):
'''Confirms written values and sends to iPS'''
# Get params
sens = self.parent.frame_select.var_sens.get()
value = self.list_switch[self.var_switch.get()]
# Get PSU and magnet fields for safety check
field_psu = self.ports.ips.__dict__[sens].Read_option(
'FLD', warn=False)
field_pers = self.ports.ips.__dict__[sens].Read_option(
'PFLD', warn=False)
field_psu = float(field_psu[:-1])
field_pers = float(field_pers[:-1])
if abs(field_psu-field_pers) > SWITCH_TOLERANCE:
logger.error('Persistent field differs from current field')
messagebox.showerror('Could not switch heater!',
'The persistent field and current field are not the same!')
else :
# Log
logger.info('Setting switch heater mode to: '+ value)
# Send value to iPS
if not self.ports.ips.__dict__[sens].Set_option('SWHT', value):
logger.error('Failed to set switch heater to '+value)
def Update(self, mode):
'''Updates values from IPS'''
logger.info('Updating switch heater mode: '+ str(mode))
if mode == 'ON': self.var_switch.set(0)
elif mode == 'OFF': self.var_switch.set(1)
else: logger.warning('Unknown switch reply: '+ mode)
# Study this guy....
class Ramp(tk.LabelFrame):
'''Ramp frame and inner objects'''
def __init__(self, parent, ports):
'''Calls init method of LabelFrame and fills up frame'''
tk.LabelFrame.__init__(self, parent, text='Ramp mode', padx=10,
pady=5)
self.parent = parent
self.ports = ports
# List of possible states
self.list_ramp = ['HOLD', 'RTOS', 'RTOZ', 'CLMP']
self.dict_ramp = {
'HOLD': 'Hold',
'RTOS': 'To set',
'RTOZ': 'To zero',
'CLMP': 'Clamp'
}
self.var_ramp = tk.StringVar(self)
self.Widgets()
def Widgets(self):
'''Shapes the frame's widgets'''
# Spacer
ttk.Label(self, text=' ').grid(row=1, column=1)
self.grid_columnconfigure(1, weight=1) # Alows stretch and centering
# Buttons
self.button_hold = ttk.Button(self, text='Hold',
command=lambda:self.Set('HOLD'), width=12)
self.button_hold.grid(row=1, column=0)
self.button_clamp = ttk.Button(self, text='Clamp',
command=lambda:self.Set('CLMP'), width=12)
self.button_clamp.grid(row=1, column=2)
self.button_to_set = ttk.Button(self, text='To set',
command=lambda:self.Set('RTOS'), width=12)
self.button_to_set.grid(row=2, column=0)
self.button_to_zero = ttk.Button(self, text='To zero',
command=lambda:self.Set('RTOZ'), width=12)
self.button_to_zero.grid(row=2, column=2)
# Show last state
ttk.Label(self, text='Last state:').grid(row=0, column=0, sticky='E')
self.entry_ramp = ttk.Entry(self, textvariable=self.var_ramp,
justify='center', width=10, state='readonly')
self.entry_ramp.grid(row=0, column=2)
def Set(self, mode):
'''Sends the button command to iPS'''
# Get sensor
sens = self.parent.frame_select.var_sens.get()
# Log
logger.info('Setting ramp mode: '+mode)
self.var_ramp.set(self.dict_ramp[mode])
# Send command
if not self.ports.ips.__dict__[sens].Set_option('ACTN', mode):
logger.error('Failed to set switch heater to '+mode)
self.Monitor()
def Monitor(self):
'''Keeps refreshing the ramp window untill Hold is reached'''
self.parent.Monitor_ramp()
# Repeat untill Hold is reached
if self.var_ramp.get() not in ['Hold', 'Clamp']:
self.repeat = self.after(10*1000, self.Monitor)
else: self.repeat = None # Remove reference to repeat event
def Update(self, mode):
'''Updates values from iPS'''
logger.info('Updating ramp mode: '+ str(mode))
self.var_ramp.set(self.dict_ramp[mode])
class Sensors(tk.LabelFrame):
'''Sensor frame and inner objects'''
def __init__(self, parent, ports):
'''Calls init method of LabelFrame and fills up frame'''
tk.LabelFrame.__init__(self, parent, text='Sensors', padx=10, pady=5)
self.parent = parent
self.ports = ports
self.Widgets()
self.file_end = '_Sensor.log'
# Create directory if non existent
self.file_directory = os.path.join('log_files','sensors')
try: os.mkdir('log_files')
except: pass
try: os.mkdir(self.file_directory)
except: pass
# Start logging on self
self.logging = self.after(10, self.Log)
def Widgets(self):
'''Shapes the frame's widgets'''
# Labels
ttk.Label(self, text='Helium level').grid(row=0, column=0)
ttk.Label(self, text='Nitrogen level').grid(row=0, column=2)
#ttk.Label(self, text='Magnet temperature').grid(row=3, column=0,
# columnspan=3)
# Spacer
ttk.Label(self, text=' ').grid(row=0, column=1)
self.grid_columnconfigure(1, weight=1) # Alows stretch and centering
# Entries
self.var_resistance = tk.StringVar(self)
self.entry_resistance = ttk.Entry(self, width=12, state='readonly',
textvariable=self.var_resistance, justify='center')
self.entry_resistance.grid(row=1, column=0)
self.var_freq = tk.StringVar(self)
self.entry_freq = ttk.Entry(self, width=12, state='readonly',
textvariable=self.var_freq, justify='center')
self.entry_freq.grid(row=1, column=2)
# Temperature sensor not used yet
"""
self.var_temp = tk.StringVar(self)
self.entry_temp = ttk.Entry(self, width=12, state='readonly',
textvariable=self.var_temp, justify='center')
self.entry_temp.grid(row=4, column=0, columnspan=3)
"""
# Status bars
self.var_helium = tk.DoubleVar(self)
self.bar_helium = ttk.Progressbar(self, variable=self.var_helium,
length=68)
self.bar_helium.grid(row=2, column=0)
self.var_nitrogen = tk.DoubleVar(self)
self.bar_nitrogen = ttk.Progressbar(self, variable=self.var_nitrogen,
length=68)
self.bar_nitrogen.grid(row=2, column=2)
def Log(self):
'''Function for continous logging of sensor data into file'''
# Fetch fresh values
log = self.ports.Get_Fsens(self.parent.frame_select.var_lvl.get())
# Update self
self.Update(log[1:])
# Define file parameters
file_name = log[0].strftime('%Y%m%d') + self.file_end
file_path = os.path.join(self.file_directory, file_name)
with open(file_path, 'a', newline='') as f:
writer = csv.writer(f, delimiter=';')
# Add line
line = []
line.append(log[0].strftime('%H:%M:%S'))
for i in log[1:]:
try:
line.append(float(i[:-1])) # Strip the % unit
except TypeError:
line.append('') # Write empty string, if None
writer.writerow(line)
# Continue logging
self.logging = self.after(LOG_TIME, self.Log)
def Update(self, fsensors):
'''Updates values from iPS'''
logger.info('Updating IPS sensor status: '+str(fsensors))
try:
self.var_helium.set(float(fsensors[0][:-1]))
self.var_nitrogen.set(float(fsensors[1][:-1]))
except: pass
# Use percentage values instead
self.var_resistance.set(fsensors[0])
self.var_freq.set(fsensors[1])
try:
self.parent.parent.menu.cryo_app.Update(datetime.datetime.now(), float(fsensors[0][:-1]), float(fsensors[1][:-1]))
except Exception as e:
pass # usual error is that the cryo log does not exist.
# print("Updating cryo log failed with error", e) # for debuging
#Cryo_application.Update(Cryo_application, datetime.datetime.timestamp(datetime.datetime.now()), float(fsensors[0][:-1]), float(fsensors[1][:-1]))
#self.var_resistance.set(fsensors[2])
#self.var_freq.set(fsensors[3])
# Temperature sensor not used yet
#self.var_temp.set(fsensors[4])
class Select(tk.Frame):
'''Sensor frame and inner objects'''
def __init__(self, parent, ports):
'''Calls init method of LabelFrame and fills up frame'''
tk.Frame.__init__(self, parent)
self.parent = parent
self.ports = ports
self.Widgets()
def Widgets(self):
'''Shapes the frame's widgets'''
# Labels
ttk.Label(self, text='PSU board select').grid(row=0, column=0,
sticky='E')
ttk.Label(self, text='Level board').grid(row=1, column=0,
sticky='E')
ttk.Label(self, text='Temperature board').grid(row=2, column=0,
sticky='E')
# Spacer
ttk.Label(self, text=' ').grid(row=0, column=1)
self.grid_columnconfigure(1, weight=1) # Alows stretch and centering
# PSU board/sens
self.list_sens = List_sensors('PSU', self.ports.ips)
self.var_sens = tk.StringVar(self)
self.var_sens.set(MAIN_PSU) # Default board
self.combo_sens = ttk.Combobox(self, width=7, state='disabled',
values=self.list_sens, textvar=self.var_sens)
self.combo_sens.grid(row=0, column=2)
# Disabled to prevent tinkering, can change to 'readonly'
# Select lvl board
self.list_lvl = List_sensors('LVL', self.ports.ips)
self.var_lvl = tk.StringVar(self)
self.var_lvl.set(MAIN_LVL) # Default board
self.combo_lvl = ttk.Combobox(self, width=7, state='disabled',
values=self.list_lvl, textvar=self.var_lvl)
self.combo_lvl.grid(row=1, column=2)
# Disabled to prevent tinkering, can change to 'readonly'
# Select temperature board
self.list_temp = List_sensors('TEMP', self.ports.ips)
self.var_temp = tk.StringVar(self)
self.var_temp.set(MAIN_TEMP) # Default board
self.combo_temp = ttk.Combobox(self, width=7, state='disabled',
values=self.list_temp, textvar=self.var_temp)
self.combo_temp.grid(row=2, column=2)
# Disabled to prevent tinkering, can change to 'readonly'
```
#### File: Mercury_control/gui/itc.py
```python
import tkinter as tk # Gui package
from tkinter import ttk # Fancier widgets
from tkinter import messagebox
from gui.funct import Strip_T, List_sensors
import logging
logger = logging.getLogger('log') # Set the logger
# Global variables
MAIN_SENSOR = 'MB1.T1' # Sensor used for temperature loop
class ITC_frame(tk.Frame):
'''The controll frame for ITC'''
def __init__(self, parent, ports):
tk.Frame.__init__(self, parent, width=300, height=900, padx=5, pady=5)
self.parent = parent
self.ports = ports
self.Widgets()
# Fill in parameters from iTC
self.Load_parameters()
def Widgets(self):
'''Shapes the frame's widgets'''
# Label
self.label_title = ttk.Label(self, text='Mercury iTC')
self.label_title.config(font=('Courier', 16))
self.label_title.pack(side='top', fill='x', padx=10)
# Status frame
self.frame_status = Status(self, self.ports)
self.frame_status.pack(side='top', fill='x', padx=5, pady=5)
# Set frame
self.frame_set = SetT(self, self.ports)
self.frame_set.pack(side='top', fill='x', padx=5, pady=5)
# Manual frame
self.frame_manual = Manual(self, self.ports)
self.frame_manual.pack(side='top', fill='x', padx=5, pady=5)
# # Loop frame
# self.frame_loop = Loop(self, self.ports)
# self.frame_loop.pack(side='top', fill='x', padx=5, pady=5)
# # Heater limits frame
# self.frame_limits = Limits(self, self.ports)
# self.frame_limits.pack(side='top', fill='x', padx=5, pady=5)
# Select sensor frame
self.frame_sensor = Select(self, self.ports)
self.frame_sensor.pack(side='top', fill='x', padx=5, pady=5)
self.var_sens = self.frame_sensor.var_sens
# Load parameters
self.button_load = ttk.Button(self, text='Load from iTC',
command=self.Load_parameters, width=20)
self.button_load.pack(side='top',pady=5)
def Load_parameters(self):
'''Talks to ITC and refreshes all values in entry boxes'''
logger.info('Loading ITC parameters from'+ self.var_sens.get())
tset = self.ports.Get_Tset(self.var_sens.get())
tmanual = self.ports.Get_Tmanual(self.var_sens.get())
tloop = self.ports.Get_Tloop(self.var_sens.get())
tstatus = self.ports.Get_Tstatus(self.var_sens.get(), tloop[3])
#tlimits = self.ports.Get_Tlimits(self.var_sens.get(), tloop[3])
self.frame_status.Update(tstatus)
self.frame_set.Update(tset)
self.frame_manual.Update(tmanual)
#self.frame_loop.Update(tloop)
#self.frame_limits.Update(tlimits)
class Status(tk.LabelFrame):
'''Status frame and inner objects'''
def __init__(self, parent, ports):
'''Calls init method of LabelFrame and fills up frame'''
tk.LabelFrame.__init__(self, parent, text='Status', padx=10, pady=5)
self.parent = parent
self.ports = ports
self.Widgets()
def Widgets(self):
'''Shapes the frame's widgets'''
# Labels
ttk.Label(self, text='Temperature').grid(row=0, column=0)
ttk.Label(self, text='Setpoint').grid(row=0, column=2)
ttk.Label(self, text='Heater %').grid(row=2, column=0)
ttk.Label(self, text='Flow %').grid(row=2, column=2)
ttk.Label(self, text='Heater Power').grid(row=4, column=0)
ttk.Label(self, text='Needle position').grid(row=4, column=2)
# Spacer
ttk.Label(self, text=' ').grid(row=0, column=1)
self.grid_columnconfigure(1, weight=1) # Alows stretch and centering
# Entries
self.var_temp = tk.StringVar(self)
self.entry_temp = ttk.Entry(self, textvariable=self.var_temp,
justify='center', width=12, state='readonly')
self.entry_temp.grid(row=1, column=0)
self.var_tset = tk.StringVar(self)
self.entry_tset = ttk.Entry(self, textvariable=self.var_tset,
justify='center', width=12, state='readonly')
self.entry_tset.grid(row=1, column=2)
self.var_power = tk.StringVar(self)
self.entry_power = ttk.Entry(self, textvariable=self.var_power,
justify='center', width=12, state='readonly')
self.entry_power.grid(row=5, column=0)
self.var_needle = tk.StringVar(self)
self.entry_needle = ttk.Entry(self, textvariable=self.var_needle,
justify='center', width=12, state='readonly')
self.entry_needle.grid(row=5, column=2)
# Status bars
self.var_heater = tk.DoubleVar(self)
self.bar_heater = ttk.Progressbar(self, variable=self.var_heater,
length=68)
self.bar_heater.grid(row=3, column=0)
self.var_flow = tk.DoubleVar(self)
self.bar_flow = ttk.Progressbar(self, variable=self.var_flow,
length=68)
self.bar_flow.grid(row=3, column=2)
def Update(self, tstatus):
'''Updates values from iTC
(time, temperature, setpoint, heater, flow, power)'''
logger.info('Updating iTC status: '+ str(tstatus))
self.var_temp.set(tstatus[1])
self.var_tset.set(tstatus[2])
self.var_heater.set(tstatus[3])
self.var_flow.set(tstatus[4])
self.var_power.set(tstatus[5])
self.var_needle.set(tstatus[4])
class SetT(tk.LabelFrame):
'''Set temperature frame and inner objects'''
def __init__(self, parent, ports):
'''Calls init method of LabelFrame and fills up frame'''
tk.LabelFrame.__init__(self, parent, text='Set Temperature', padx=10,
pady=5)
self.parent = parent
self.ports = ports
self.Widgets()
def Widgets(self):
'''Shapes the frame's widgets'''
# Labels
ttk.Label(self, text='Set T:').grid(row=0, column=0, sticky='E')
ttk.Label(self, text='Ramp rate:').grid(row=1, column=0, sticky='E')
ttk.Label(self, text='Enable ramp').grid(row=2, column=0, sticky='E')
ttk.Label(self, text='Confirm').grid(row=3, column=0, sticky='E')
ttk.Label(self, text='K').grid(row=0, column=3, sticky='W')
ttk.Label(self, text='K/min').grid(row=1, column=3, sticky='W')
# Spacer
ttk.Label(self, text=' ').grid(row=0, column=1)
self.grid_columnconfigure(1, weight=1) # Alows stretch and centering
# Checkbuttons
self.var_ramp = tk.BooleanVar(self)
self.check_ramp = ttk.Checkbutton(self, variable=self.var_ramp)
self.check_ramp.grid(row=2, column=2, columnspan=2)
# Entries
self.var_set = tk.StringVar(self)
self.entry_set = ttk.Entry(self, width=7, textvariable=self.var_set,
justify='right')
self.entry_set.grid(row=0, column=2)
self.var_rate = tk.StringVar(self)
self.entry_rate = ttk.Entry(self, width=7, textvariable=self.var_rate,
justify='right')
self.entry_rate.grid(row=1, column=2)
# Button
self.button_set = ttk.Button(self, text='Set T',
command=self.Set, width=10)
self.button_set.grid(row=3, column=2, columnspan=2)
def Set(self):
'''Confirms written values and sends to iTC'''
values = [
self.var_set.get(),
self.var_rate.get(),
'ON' if self.var_ramp.get() else 'OFF'
]
logger.info('Setting iTC values: ' + str(values))
self.ports.Set_Tset(self.parent.var_sens.get(), values)
def Update(self, tset):
'''Updates previously set values from iTC'''
logger.info('Updating previous set temperature values: '+ str(tset))
self.var_set.set(Strip_T(tset[0]))
self.var_rate.set(Strip_T(tset[1]))
self.var_ramp.set(True if tset[2] == 'ON' else False)
class Loop(tk.LabelFrame):
'''Loop frame and inner objects'''
def __init__(self, parent, ports):
'''Calls init method of LabelFrame and fills up frame'''
tk.LabelFrame.__init__(self, parent, text='Loop control', padx=10,
pady=5)
self.parent = parent
self.ports = ports
self.Widgets()
def Widgets(self):
'''Shapes the frame's widgets'''
# Labels
ttk.Label(self, text='P:').grid(row=0, column=0, sticky='E')
ttk.Label(self, text='I:').grid(row=1, column=0, sticky='E')
ttk.Label(self, text='D:').grid(row=2, column=0, sticky='E')
ttk.Label(self, text='Heater').grid(row=3, column=0, sticky='E')
ttk.Label(self, text='Aux').grid(row=4, column=0, sticky='E')
ttk.Label(self, text='min').grid(row=1, column=3, sticky='W')
ttk.Label(self, text='min').grid(row=2, column=3, sticky='W')
# Spacer
ttk.Label(self, text=' ').grid(row=0, column=1)
self.grid_columnconfigure(1, weight=1) # Alows stretch and centering
# Entries
self.var_P = tk.StringVar(self)
self.entry_P = ttk.Entry(self, width=7, textvariable=self.var_P,
justify='right')
self.entry_P.grid(row=0, column=2, sticky='E')
self.var_I = tk.StringVar(self)
self.entry_I = ttk.Entry(self, width=7, textvariable=self.var_I,
justify='right')
self.entry_I.grid(row=1, column=2, sticky='E')
self.var_D = tk.StringVar(self)
self.entry_D = ttk.Entry(self, width=7, textvariable=self.var_D,
justify='right')
self.entry_D.grid(row=2, column=2, sticky='E')
# Combo box
self.list_heat = List_sensors('HTR', self.ports.itc)
self.var_heat = tk.StringVar(self)
self.combo_heat = ttk.Combobox(self, width=8, state='readonly',
values=self.list_heat, textvar=self.var_heat)
self.combo_heat.grid(row=3, column=2, columnspan=2, sticky='W')
self.list_aux = List_sensors('AUX', self.ports.itc)
self.var_aux = tk.StringVar(self)
self.combo_aux = ttk.Combobox(self, width=8, state='readonly',
values=self.list_aux, textvar=self.var_aux)
self.combo_aux.grid(row=4, column=2, columnspan=2, sticky='W')
# Button
self.button_set = ttk.Button(self, text='Set',
command=self.Set, width=10)
self.button_set.grid(row=5, column=0, columnspan=4)
def Set(self):
'''Confirms written values and sends to iTC'''
logger.info('Updating loop parameters')
def Update(self, tloop):
'''Updates values from iTC'''
logger.info('Updating loop control values: '+ str(tloop))
self.var_P.set(tloop[0])
self.var_I.set(tloop[1])
self.var_D.set(tloop[2])
self.var_heat.set(tloop[3])
self.var_aux.set(tloop[4])
class Manual(tk.LabelFrame):
'''Manual heater and flow control commands'''
def __init__(self, parent, ports):
'''Calls init method of LabelFrame and fills up frame'''
tk.LabelFrame.__init__(self, parent, text='Manual control',
padx=10, pady=5)
self.parent = parent
self.ports = ports
self.Widgets()
def Widgets(self):
'''Shapes the frame's widgets'''
# Labels
ttk.Label(self, text='Heater %').grid(row=0, column=0)
ttk.Label(self, text='Flow %').grid(row=0, column=2)
# Spacer
ttk.Label(self, text=' ').grid(row=0,column=1)
self.grid_columnconfigure(1, weight=1) # Alows stretch and centering
# Entries
self.var_heater = tk.StringVar(self)
self.entry_heater = ttk.Entry(self, textvariable=self.var_heater,
justify='center', width=12)
self.entry_heater.grid(row=1, column=0)
self.var_flow = tk.StringVar(self)
self.entry_flow = ttk.Entry(self, textvariable=self.var_flow,
justify='center', width=12)
self.entry_flow.grid(row=1, column=2)
# Buttons
self.button_heater_man = ttk.Button(self, text='Set heater',
command=self.Set_heater, width=12)
self.button_heater_man.grid(row=2, column=0)
self.button_flow_man = ttk.Button(self, text='Set flow',
command=self.Set_flow, width=12)
self.button_flow_man.grid(row=2, column=2)
self.button_heater_auto = ttk.Button(self, text='Auto heater',
command=self.Auto_heater, width=12)
self.button_heater_auto.grid(row=3, column=0)
self.button_flow_auto = ttk.Button(self, text='Auto flow',
command=self.Auto_flow, width=12)
self.button_flow_auto.grid(row=3, column=2)
# Check buttons
self.var_heater_check = tk.BooleanVar(self)
self.check_heater = ttk.Checkbutton(self, state='disabled',
variable=self.var_heater_check)
self.check_heater.grid(row=4, column=0)
self.var_flow_check = tk.BooleanVar(self)
self.check_flow = ttk.Checkbutton(self, state='disabled',
variable=self.var_flow_check)
self.check_flow.grid(row=4, column=2)
def Set_heater(self):
'''Confirms written values and sends to iTC'''
# Get params
sens = self.parent.var_sens.get()
value = self.var_heater.get()
# Log
logger.info('Setting manual heater to '+value)
# Change to manual (PID controll OFF)
if self.ports.itc.__dict__[sens].Set_option('ENAB', 'OFF'):
self.var_heater_check.set(False)
# Set new value
if not self.ports.itc.__dict__[sens].Set_option('HSET', value):
logger.error('Failed to set heater to '+value)
else: logger.error('Failed to disable PID control')
def Set_flow(self):
'''Confirms written values and sends to iTC'''
# Get params
sens = self.parent.var_sens.get()
value = self.var_flow.get()
# Log
logger.info('Setting manual flow to: '+value)
# Change to manual (flow control OFF)
if self.ports.itc.__dict__[sens].Set_option('FAUT', 'OFF'):
self.var_flow_check.set(False)
# Set new value
if not self.ports.itc.__dict__[sens].Set_option('FLSET', value):
logger.error('Failed to set flow to '+value)
else: logger.error('Failed to disable auto flow')
def Auto_heater(self):
'''Enables automatic heater control'''
# get current sensor
sens = self.parent.var_sens.get()
# Log
logger.info('Setting heater control to automatic')
# Send to iTC, edit checkbox when sucessfull
if self.ports.itc.__dict__[sens].Set_option('ENAB', 'ON'):
self.var_heater_check.set(True)
else: logger.error('Failed to enable auto heater control!')
def Auto_flow(self):
'''Enables automatic flow control'''
# get current sensor
sens = self.parent.var_sens.get()
# Log
logger.info('Setting flow control to automatic')
# Send to iTC, edit checkbox when sucessfull
if self.ports.itc.__dict__[sens].Set_option('FAUT', 'ON'):
self.var_flow_check.set(True)
else: logger.error('Failed to enable auto flow control!')
def Update(self, tmanual):
'''Updates values from iTC
(heater, flow, heater_enable, flow_enable)'''
logger.info('Updating manual control values'+ str(tmanual))
self.var_heater.set(tmanual[0])
self.var_flow.set(tmanual[1])
self.var_heater_check.set(True if tmanual[2] == 'ON' else False)
self.var_flow_check.set(True if tmanual[3] == 'ON' else False)
class Limits(tk.LabelFrame):
'''Limits frame and inner objects'''
def __init__(self, parent, ports):
'''Calls init method of LabelFrame and fills up frame'''
tk.LabelFrame.__init__(self, parent, text='Heating limits',
padx=10, pady=5)
self.parent = parent
self.ports = ports
self.Widgets()
def Widgets(self):
'''Shapes the frame's widgets'''
# Labels
ttk.Label(self, text='Heater limit:').grid(row=0, column=0, sticky='E')
ttk.Label(self, text='Max temp limit:').grid(row=1, column=0,
sticky='E')
ttk.Label(self, text='Min temp limit:').grid(row=2, column=0,
sticky='E')
ttk.Label(self, text='V').grid(row=0, column=3, sticky='W')
ttk.Label(self, text='K').grid(row=1, column=3, sticky='W')
ttk.Label(self, text='K').grid(row=2, column=3, sticky='W')
# Spacer
ttk.Label(self, text=' ').grid(row=0, column=1)
self.grid_columnconfigure(1, weight=1) # Alows stretch and centering
# Entries
self.var_heat = tk.StringVar(self)
self.entry_heat = ttk.Entry(self, width=7, textvariable=self.var_heat,
justify='right')
self.entry_heat.grid(row=0, column=2)
self.var_tmax = tk.StringVar(self)
self.entry_tmax = ttk.Entry(self, width=7, textvariable=self.var_tmax,
justify='right')
self.entry_tmax.grid(row=1, column=2)
self.var_tmin = tk.StringVar(self)
self.entry_tmin = ttk.Entry(self, width=7, textvariable=self.var_tmin,
justify='right')
self.entry_tmin.grid(row=2, column=2)
# Button
self.button_set = ttk.Button(self, text='Set',
command=self.Set, width=10)
self.button_set.grid(row=4, column=0, columnspan=4)
def Set(self):
'''Confirms written values and sends to iTC'''
logger.info('Setting heater limits')
def Update(self, tlimits):
'''Updates heating limits values from iTC'''
logger.info('Updating heating limits values: '+ str(tlimits))
self.var_heat.set(tlimits[0])
self.var_tmax.set(Strip_T(tlimits[1]))
self.var_tmin.set(Strip_T(tlimits[2]))
class Select(tk.Frame):
'''Sensor frame and inner objects'''
def __init__(self, parent, ports):
'''Calls init method of LabelFrame and fills up frame'''
tk.Frame.__init__(self, parent)
self.parent = parent
self.ports = ports
self.Widgets()
def Widgets(self):
'''Shapes the frame's widgets'''
# Labels
ttk.Label(self, text='Sensor select').grid(row=0,
column=0, sticky='E')
# Spacer
ttk.Label(self, text=' ').grid(row=0, column=1)
self.grid_columnconfigure(1, weight=1) # Alows stretch and centering
# Select sensor frame
self.list_sens = List_sensors('TEMP', self.ports.itc)
self.var_sens = tk.StringVar(self)
#self.parent.var_sens = self.var_sens # give var_sens to itc frame
self.var_sens.set(MAIN_SENSOR) # Default board
self.combo_sens = ttk.Combobox(self, state='disabled',
values=self.list_sens, textvar=self.var_sens, width=7)
# Disabled to prevent tinkering, can change to 'readonly'
self.combo_sens.grid(row=0, column=2)
``` |
{
"source": "JNero/2017_Mathematical_Contest",
"score": 3
} |
#### File: JNero/2017_Mathematical_Contest/Dijkstra.py
```python
G = {1:{1:0, 2:1, 3:12},
2:{2:0, 3:9, 4:3},
3:{3:0, 5:5},
4:{3:4, 4:0, 5:13, 6:15},
5:{5:0, 6:4},
6:{6:0}}
# 每次找到离源点最近的一个顶点,然后以该顶点为重心进行扩展
# 最终的到源点到其余所有点的最短路径
# 一种贪婪算法
def Dijkstra(G,v0,INF=999):
""" 使用 Dijkstra 算法计算指定点 v0 到图 G 中任意点的最短路径的距离
INF 为设定的无限远距离值
此方法不能解决负权值边的图
"""
book = set()
minv = v0
# 源顶点到其余各顶点的初始路程
dis = dict((k,INF) for k in G.keys())
dis[v0] = 0
while len(book)<len(G):
book.add(minv) # 确定当期顶点的距离
for w in G[minv]: # 以当前点的中心向外扩散
if dis[minv] + G[minv][w] < dis[w]: # 如果从当前点扩展到某一点的距离小与已知最短距离
dis[w] = dis[minv] + G[minv][w] # 对已知距离进行更新
new = INF # 从剩下的未确定点中选择最小距离点作为新的扩散点
for v in dis.keys():
if v in book: continue
if dis[v] < new:
new = dis[v]
minv = v
return dis
if __name__ == '__main__':
dis = Dijkstra(G, v0=1)
print(dis.values())
``` |
{
"source": "Jnesselr/PysherPlus",
"score": 3
} |
#### File: PysherPlus/examples/basic.py
```python
import sys
from pysherplus.pusher import Pusher
import time
# Add a logging handler so that we can see the raw communication data
import logging
root = logging.getLogger()
root.setLevel(logging.INFO)
ch = logging.StreamHandler(sys.stdout)
root.addHandler(ch)
def channel_callback(event, data):
print(f"Channel Callback: ({event}) {data}")
if __name__ == '__main__':
if len(sys.argv) != 2:
print(f"Usage: python {sys.argv[0]} <app_key>")
sys.exit(1)
app_key = sys.argv[1]
pusher = Pusher(app_key)
pusher['test_channel']['my_event'].register(channel_callback)
pusher.connect()
while True:
time.sleep(1)
```
#### File: PysherPlus/pysherplus/authentication.py
```python
import abc
import hashlib
import hmac
import json
from dataclasses import dataclass, field
from typing import Optional, Any, Union
import requests
@dataclass(frozen=True)
class AuthResult(object):
token: str
user_data: Optional[Any] = field(default=None)
class PysherAuthentication(abc.ABC):
@abc.abstractmethod
def auth_token(self, socket_id: str, channel_name: str) -> Optional[AuthResult]:
pass
class KnownSecretAuthentication(PysherAuthentication):
def __init__(self,
key: str,
secret: Union[str, bytes],
user_data: Optional[Any] = None):
if isinstance(secret, str):
secret = secret.encode('utf-8')
self._key = key
self._secret = secret
self._user_data = user_data
def auth_token(self, socket_id: str, channel_name: str) -> Optional[AuthResult]:
is_presence_channel = channel_name.startswith('presence-')
if is_presence_channel:
subject = f"{socket_id}:{channel_name}"
else:
subject = f"{socket_id}:{channel_name}:{json.dumps(self._user_data)}"
h = hmac.new(self._secret, subject.encode('utf-8'), hashlib.sha256)
token = f"{self._key}:{h.hexdigest()}"
return AuthResult(
token=token,
user_data=self._user_data if is_presence_channel else None
)
class URLAuthentication(PysherAuthentication):
def __init__(self,
url: str,
session: Optional[requests.Session] = None):
self._session = session or requests.Session()
self._url = url
def auth_token(self, socket_id: str, channel_name: str) -> Optional[AuthResult]:
response = self._session.post(
self._url,
json={
'socket_id': socket_id,
'channel_name': channel_name
}
)
if not response.ok:
return None
response_json = response.json()
return AuthResult(
token=response_json['auth'],
user_data=response_json['channel_data'] if 'channel_data' in response_json else None
)
``` |
{
"source": "Jnesselr/scclient",
"score": 3
} |
#### File: scclient/scclient/event_listener.py
```python
from typing import Set
class Listener(object):
def __init__(self, callbacks: Set):
self._callbacks = callbacks
def add(self, callback):
self._callbacks.add(callback)
def remove(self, callback):
self._callbacks.remove(callback)
def __call__(self, callback):
self.add(callback)
class EventListener(object):
def __init__(self):
self._callbacks = set()
self._listener = Listener(self._callbacks)
@property
def listener(self):
return self._listener
def emit(self, *args):
for callback in self._callbacks:
callback(*args)
def __call__(self, *args):
self.emit(*args)
```
#### File: scclient/scclient/socket_client.py
```python
import collections
import json
import time
from threading import Lock, Thread
from websocket import WebSocketApp
from scclient.channel import Channel
from scclient.event_listener import EventListener
class ChannelsRepository(collections.Mapping):
def __init__(self, new_channel_function):
self._channels = {}
self._new_channel_function = new_channel_function
self._lock = Lock()
def __getitem__(self, key):
with self._lock:
if key not in self._channels:
self._channels[key] = self._new_channel_function(key)
return self._channels[key]
def __contains__(self, key):
return self._channels.__contains__(key)
def __len__(self):
return len(self._channels)
def __iter__(self):
return self._channels.__iter__()
class CallbackWrapper(object):
def __init__(self, name, callback):
self.name = name
self.callback = callback
def __call__(self, message_object):
error = message_object["error"] if "error" in message_object else None
data = message_object["data"] if "data" in message_object else None
self.callback(self.name, error, data)
class SocketClient(object):
def __init__(self, url,
reconnect_enabled=False,
reconnect_delay=2):
self._ws = WebSocketApp(url,
on_open=self._internal_on_open,
on_close=self._internal_on_close,
on_message=self._internal_on_message)
self._ws_thread = None
self._ws_connected = False
self._cid = 0
self._cid_lock = Lock()
self._id = None
self._auth_token = None
self._event_based_callbacks = collections.defaultdict(set)
self._id_based_callbacks = collections.defaultdict(list)
self._on_connect_event = EventListener()
self._on_disconnect_event = EventListener()
self._on_subscribe_event = EventListener()
self._on_unsubscribe_event = EventListener()
self._reconnect_enabled = bool(reconnect_enabled)
self._reconnect_delay = float(reconnect_delay)
self._repository = ChannelsRepository(self._new_channel)
def _new_channel(self, name):
return Channel(name=name,
client=self,
on_subscribe=lambda: self._on_subscribe_event(self, name),
on_unsubscribe=lambda: self._on_unsubscribe_event(self, name))
@property
def id(self):
return self._id
@property
def connected(self):
return self._ws_connected
@property
def reconnect_enabled(self):
return self._reconnect_enabled
@reconnect_enabled.setter
def reconnect_enabled(self, enabled):
self._reconnect_enabled = bool(enabled)
@property
def reconnect_delay(self):
return self._reconnect_delay
@property
def channels(self):
return self._repository
@property
def on_connect(self):
return self._on_connect_event.listener
@property
def on_disconnect(self):
return self._on_disconnect_event.listener
@property
def on_subscribe(self):
return self._on_subscribe_event.listener
@property
def on_unsubscribe(self):
return self._on_unsubscribe_event.listener
def connect(self):
if self._ws_thread is not None and self._ws_thread.is_alive():
return
self._ws_thread = Thread(target=self._ws_thread_run, daemon=True)
self._ws_thread.start()
def disconnect(self):
self.reconnect_enabled = False
# This causes the _ws_thread to stop on its own
self._ws.close()
def emit(self, event, data, callback=None):
payload = {
"event": event,
"data": data,
}
if callback is not None:
cid = self._get_next_cid()
payload["cid"] = cid
self._id_based_callbacks[cid].append(CallbackWrapper(event, callback))
self._ws.send(json.dumps(payload, sort_keys=True))
def on(self, event, callback):
self._event_based_callbacks[event].add(callback)
def publish(self, channel, data, callback=None):
cid = self._get_next_cid()
payload = {
"event": "#publish",
"data": {
"channel": channel,
"data": data,
},
"cid": cid
}
if callback is not None:
self._id_based_callbacks[cid].append(CallbackWrapper(channel, callback))
self._ws.send(json.dumps(payload, sort_keys=True))
def subscribe(self, channel_name, callback):
self._repository[channel_name].subscribe(callback)
return self._repository[channel_name]
def unsubscribe(self, channel_name, callback):
self._repository[channel_name].unsubscribe(callback)
def _get_next_cid(self):
with self._cid_lock:
self._cid += 1
return self._cid
def _ws_thread_run(self):
while True:
self._ws.run_forever()
if self.reconnect_enabled:
time.sleep(self.reconnect_delay)
else:
break
def _internal_on_open(self):
with self._cid_lock:
self._cid = 0
cid = self._get_next_cid()
handshake_event_name = "#handshake"
handshake_object = {
"event": handshake_event_name,
"data": {
"authToken": self._auth_token,
},
"cid": cid,
}
callback_wrapper = CallbackWrapper(handshake_event_name, self._internal_handshake_response)
self._id_based_callbacks[cid].append(callback_wrapper)
self._ws.send(json.dumps(handshake_object, sort_keys=True))
def _internal_handshake_response(self, event_name, error, response):
self._id = response["id"]
self._ws_connected = True
self._on_connect_event(self)
def _internal_on_close(self):
self._id = None
self._ws_connected = False
self._on_disconnect_event(self)
def _internal_on_message(self, message):
if message == "#1": # ping
self._ws.send("#2") # pong
return
message_object = json.loads(message)
if "rid" in message_object and message_object["rid"] in self._id_based_callbacks:
for callback_wrapper in self._id_based_callbacks[message_object["rid"]]:
callback_wrapper(message_object)
if "event" not in message_object:
return
event_name = message_object["event"]
message_data = message_object["data"] if "data" in message_object else None
if event_name in self._event_based_callbacks:
for callback in self._event_based_callbacks[event_name]:
callback(event_name, message_data)
``` |
{
"source": "jness/magic_inspiration",
"score": 3
} |
#### File: magic_inspiration/app/tools.py
```python
from mongo import (
find, search, random, find_one, insert_one, update_one, delete_one, distinct
)
def get_ideas():
"""
Gets all ideas from mongo
"""
return find('ideas')
def get_sets():
"""
Gets all distinct sets from mongo
"""
return distinct('cards', 'set_name')
def get_colors():
"""
Get all distinct color_identity from mongo
"""
return sorted(
set(
[''.join(i['color_identity']) for i in find('cards')]
)
)
def get_arguments(request):
"""
Return request arguments as dict
"""
return {key:value for key, value in request.args.items() if value}
``` |
{
"source": "jness/raventool",
"score": 3
} |
#### File: raventool/raventool/main.py
```python
import json
import requests
# base config
rpc_user = 'ravenUser'
rpc_pass = '<PASSWORD>'
url = 'http://%s:%[email protected]:8766' % (rpc_user, rpc_pass)
def get_transaction(txid):
"""
Get transaction by txid
"""
body = {
"id": 0,
"method": "getrawtransaction",
"params": {'txid': txid, 'verbose': True},
"jsonrpc": "2.0"
}
# make post request
res = requests.post(url, data=json.dumps(body))
res.raise_for_status()
return res.json()
def get_wallet_info():
"""
Get wallet info
"""
body = {
"id": 0,
"method": "getwalletinfo",
"jsonrpc": "2.0"
}
# make post request
res = requests.post(url, data=json.dumps(body))
res.raise_for_status()
return res.json()
def list_my_assets():
"""
List my assets
"""
body = {
"id": 0,
"method": "listmyassets",
"jsonrpc": "2.0"
}
# make post request
res = requests.post(url, data=json.dumps(body))
res.raise_for_status()
return res.json()
def list_accounts():
"""
List accounts
"""
body = {
"id": 0,
"method": "listaccounts",
"jsonrpc": "2.0"
}
# make post request
res = requests.post(url, data=json.dumps(body))
res.raise_for_status()
return res.json()
def list_received_by_address():
"""
List accounts
"""
body = {
"id": 0,
"method": "listreceivedbyaddress",
"params": [6, True, True],
"jsonrpc": "2.0"
}
# make post request
res = requests.post(url, data=json.dumps(body))
res.raise_for_status()
return res.json()
def sendtoaddress(address, amount):
"""
Send to address
"""
body = {
"id": 0,
"method": "sendtoaddress",
"params": [address, amount],
"jsonrpc": "2.0"
}
# make post request
res = requests.post(url, data=json.dumps(body))
res.raise_for_status()
return res.json()
if __name__ == '__main__':
pass
``` |
{
"source": "JNetForce/ECON136",
"score": 3
} |
#### File: ECON136/bsm_student/op_util.py
```python
import math
import time
from datetime import date
#
# This is how you calc the standard normal dist in Py for dval
def csnd(dval):
return (1.0 + math.erf(dval/math.sqrt(2.0)))/2.0
#
#
# Calculating days to expiry:
def days2exp(exyear, exmonth, exday):
tnow = date.today()
expiry = date(exyear, exmonth, exday)
days2expiry = abs(expiry - tnow)
return (int(days2expiry.days))
#
#
# Calculating the BSM call option price.
def copo(stock,strike,dayvol,days,rfir):
d1 = math.log(stock/strike)+((rfir/365)+(dayvol**2)/2)*days
durvol = dayvol*math.sqrt(days)
delta = csnd(d1/durvol)
cumd2 = csnd((d1/durvol) - durvol)
discount = math.exp(-rfir*days/365)
callpr = (stock*delta)-(strike*discount*cumd2)
return [callpr,delta,durvol]
#
# Calculating the BSM put option price.
def popo(stock,strike,dayvol,days,rfir):
d1 = math.log(stock/strike)+((rfir/365)+(dayvol**2)/2)*days
durvol = dayvol*math.sqrt(days)
delta = csnd(-d1/durvol)
cumd2 = csnd(-(d1/durvol - durvol))
discount = math.exp(-rfir*days/365)
putpr = -(stock*delta)+(strike*discount*cumd2)
return [putpr,delta,durvol]
#
``` |
{
"source": "jnetocurti/patrimony-management-web-scraping",
"score": 3
} |
#### File: project/utils/processors.py
```python
import re
def replace_invalid_chars(text):
return re.sub('(\r\n?|\n)+|(-9999999999)', '', text).strip()
def format_as_number(text):
return re.sub(',', '.', re.sub(r'\.', '', replace_invalid_chars(text)))
def replace_non_numeric_chars(text):
return re.sub(r'[^\d]', '', replace_invalid_chars(text))
``` |
{
"source": "jnetod/marshmallow-jsonapi",
"score": 2
} |
#### File: marshmallow-jsonapi/marshmallow_jsonapi/schema.py
```python
import itertools
import marshmallow as ma
from marshmallow.exceptions import ValidationError
from marshmallow.utils import is_collection
from .fields import BaseRelationship, DocumentMeta, ResourceMeta
from .fields import _RESOURCE_META_LOAD_FROM, _DOCUMENT_META_LOAD_FROM
from .exceptions import IncorrectTypeError
from .utils import resolve_params, _MARSHMALLOW_VERSION_INFO, get_dump_key
TYPE = "type"
ID = "id"
class SchemaOpts(ma.SchemaOpts):
def __init__(self, meta, *args, **kwargs):
super().__init__(meta, *args, **kwargs)
self.type_ = getattr(meta, "type_", None)
self.inflect = getattr(meta, "inflect", None)
self.self_url = getattr(meta, "self_url", None)
self.self_url_kwargs = getattr(meta, "self_url_kwargs", None)
self.self_url_many = getattr(meta, "self_url_many", None)
class Schema(ma.Schema):
"""Schema class that formats data according to JSON API 1.0.
Must define the ``type_`` `class Meta` option.
Example: ::
from marshmallow_jsonapi import Schema, fields
def dasherize(text):
return text.replace('_', '-')
class PostSchema(Schema):
id = fields.Str(dump_only=True) # Required
title = fields.Str()
author = fields.HyperlinkRelated(
'/authors/{author_id}',
url_kwargs={'author_id': '<author.id>'},
)
comments = fields.HyperlinkRelated(
'/posts/{post_id}/comments',
url_kwargs={'post_id': '<id>'},
# Include resource linkage
many=True, include_resource_linkage=True,
type_='comments'
)
class Meta:
type_ = 'posts' # Required
inflect = dasherize
"""
class Meta:
"""Options object for `Schema`. Takes the same options as `marshmallow.Schema.Meta` with
the addition of:
* ``type_`` - required, the JSON API resource type as a string.
* ``inflect`` - optional, an inflection function to modify attribute names.
* ``self_url`` - optional, URL to use to `self` in links
* ``self_url_kwargs`` - optional, replacement fields for `self_url`.
String arguments enclosed in ``< >`` will be interpreted as attributes
to pull from the schema data.
* ``self_url_many`` - optional, URL to use to `self` in top-level ``links``
when a collection of resources is returned.
"""
pass
def __init__(self, *args, **kwargs):
self.include_data = kwargs.pop("include_data", ())
super().__init__(*args, **kwargs)
if self.include_data:
self.check_relations(self.include_data)
if not self.opts.type_:
raise ValueError("Must specify type_ class Meta option")
if "id" not in self.fields:
raise ValueError("Must have an `id` field")
if self.opts.self_url_kwargs and not self.opts.self_url:
raise ValueError(
"Must specify `self_url` Meta option when "
"`self_url_kwargs` is specified"
)
self.included_data = {}
self.document_meta = {}
OPTIONS_CLASS = SchemaOpts
def check_relations(self, relations):
"""Recursive function which checks if a relation is valid."""
for rel in relations:
if not rel:
continue
fields = rel.split(".", 1)
local_field = fields[0]
if local_field not in self.fields:
raise ValueError(f'Unknown field "{local_field}"')
field = self.fields[local_field]
if not isinstance(field, BaseRelationship):
raise ValueError(
'Can only include relationships. "{}" is a "{}"'.format(
field.name, field.__class__.__name__
)
)
field.include_data = True
if len(fields) > 1:
field.schema.check_relations(fields[1:])
@ma.post_dump(pass_many=True)
def format_json_api_response(self, data, many, **kwargs):
"""Post-dump hook that formats serialized data as a top-level JSON API object.
See: http://jsonapi.org/format/#document-top-level
"""
ret = self.format_items(data, many)
ret = self.wrap_response(ret, many)
ret = self.render_included_data(ret)
ret = self.render_meta_document(ret)
return ret
def render_included_data(self, data):
if not self.included_data:
return data
data["included"] = list(self.included_data.values())
return data
def render_meta_document(self, data):
if not self.document_meta:
return data
data["meta"] = self.document_meta
return data
def unwrap_item(self, item):
if "type" not in item:
raise ma.ValidationError(
[
{
"detail": "`data` object must include `type` key.",
"source": {"pointer": "/data"},
}
]
)
if item["type"] != self.opts.type_:
raise IncorrectTypeError(actual=item["type"], expected=self.opts.type_)
payload = self.dict_class()
if "id" in item:
payload["id"] = item["id"]
if "meta" in item:
payload[_RESOURCE_META_LOAD_FROM] = item["meta"]
if self.document_meta:
payload[_DOCUMENT_META_LOAD_FROM] = self.document_meta
for key, value in item.get("attributes", {}).items():
payload[key] = value
for key, value in item.get("relationships", {}).items():
# Fold included data related to this relationship into the item, so
# that we can deserialize the whole objects instead of just IDs.
if self.included_data:
included_data = []
inner_data = value.get("data", [])
# Data may be ``None`` (for empty relationships), but we only
# need to process it when it's present.
if inner_data:
if not is_collection(inner_data):
included_data = next(
self._extract_from_included(inner_data), None
)
else:
for data in inner_data:
included_data.extend(self._extract_from_included(data))
if included_data:
value["data"] = included_data
payload[key] = value
return payload
@ma.pre_load(pass_many=True)
def unwrap_request(self, data, many, **kwargs):
if "data" not in data:
raise ma.ValidationError(
[
{
"detail": "Object must include `data` key.",
"source": {"pointer": "/"},
}
]
)
data = data["data"]
if many:
if not is_collection(data):
raise ma.ValidationError(
[
{
"detail": "`data` expected to be a collection.",
"source": {"pointer": "/data"},
}
]
)
return [self.unwrap_item(each) for each in data]
return self.unwrap_item(data)
def on_bind_field(self, field_name, field_obj):
"""Schema hook override. When binding fields, set ``data_key`` (on marshmallow 3) or
load_from (on marshmallow 2) to the inflected form of field_name.
"""
if _MARSHMALLOW_VERSION_INFO[0] < 3:
if not field_obj.load_from:
field_obj.load_from = self.inflect(field_name)
else:
if not field_obj.data_key:
field_obj.data_key = self.inflect(field_name)
return None
def _do_load(self, data, many=None, **kwargs):
"""Override `marshmallow.Schema._do_load` for custom JSON API handling.
Specifically, we do this to format errors as JSON API Error objects,
and to support loading of included data.
"""
many = self.many if many is None else bool(many)
# Store this on the instance so we have access to the included data
# when processing relationships (``included`` is outside of the
# ``data``).
self.included_data = data.get("included", {})
self.document_meta = data.get("meta", {})
try:
result = super()._do_load(data, many=many, **kwargs)
except ValidationError as err: # strict mode
error_messages = err.messages
if "_schema" in error_messages:
error_messages = error_messages["_schema"]
formatted_messages = self.format_errors(error_messages, many=many)
err.messages = formatted_messages
raise err
else:
# On marshmallow 2, _do_load returns a tuple (load_data, errors)
if _MARSHMALLOW_VERSION_INFO[0] < 3:
data, error_messages = result
if "_schema" in error_messages:
error_messages = error_messages["_schema"]
formatted_messages = self.format_errors(error_messages, many=many)
return data, formatted_messages
return result
def _extract_from_included(self, data):
"""Extract included data matching the items in ``data``.
For each item in ``data``, extract the full data from the included
data.
"""
return (
item
for item in self.included_data
if item["type"] == data["type"] and str(item["id"]) == str(data["id"])
)
def inflect(self, text):
"""Inflect ``text`` if the ``inflect`` class Meta option is defined, otherwise
do nothing.
"""
return self.opts.inflect(text) if self.opts.inflect else text
### Overridable hooks ###
def format_errors(self, errors, many):
"""Format validation errors as JSON Error objects."""
if not errors:
return {}
if isinstance(errors, (list, tuple)):
return {"errors": errors}
formatted_errors = []
if many:
for index, i_errors in errors.items():
formatted_errors.extend(self._get_formatted_errors(i_errors, index))
else:
formatted_errors.extend(self._get_formatted_errors(errors))
return {"errors": formatted_errors}
def _get_formatted_errors(self, errors, index=None):
return itertools.chain(
*(
[
self.format_error(field_name, message, index=index)
for message in field_errors
]
for field_name, field_errors in itertools.chain(
*(self._process_nested_errors(k, v) for k, v in errors.items())
)
)
)
def _process_nested_errors(self, name, data):
if not isinstance(data, dict):
return [(name, data)]
return itertools.chain(
*(self._process_nested_errors(f"{name}/{k}", v) for k, v in data.items())
)
def format_error(self, field_name, message, index=None):
"""Override-able hook to format a single error message as an Error object.
See: http://jsonapi.org/format/#error-objects
"""
pointer = ["/data"]
if index is not None:
pointer.append(str(index))
relationship = isinstance(
self.declared_fields.get(field_name), BaseRelationship
)
if relationship:
pointer.append("relationships")
elif field_name != "id":
# JSONAPI identifier is a special field that exists above the attribute object.
pointer.append("attributes")
pointer.append(self.inflect(field_name))
if relationship:
pointer.append("data")
return {"detail": message, "source": {"pointer": "/".join(pointer)}}
def format_item(self, item):
"""Format a single datum as a Resource object.
See: http://jsonapi.org/format/#document-resource-objects
"""
# http://jsonapi.org/format/#document-top-level
# Primary data MUST be either... a single resource object, a single resource
# identifier object, or null, for requests that target single resources
if not item:
return None
ret = self.dict_class()
ret[TYPE] = self.opts.type_
# Get the schema attributes so we can confirm `dump-to` values exist
attributes = {
(get_dump_key(self.fields[field]) or field): field for field in self.fields
}
for field_name, value in item.items():
attribute = attributes[field_name]
if attribute == ID:
ret[ID] = value
elif isinstance(self.fields[attribute], DocumentMeta):
if not self.document_meta:
self.document_meta = self.dict_class()
self.document_meta.update(value)
elif isinstance(self.fields[attribute], ResourceMeta):
if "meta" not in ret:
ret["meta"] = self.dict_class()
ret["meta"].update(value)
elif isinstance(self.fields[attribute], BaseRelationship):
if value:
if "relationships" not in ret:
ret["relationships"] = self.dict_class()
ret["relationships"][self.inflect(field_name)] = value
else:
if "attributes" not in ret:
ret["attributes"] = self.dict_class()
ret["attributes"][self.inflect(field_name)] = value
links = self.get_resource_links(item)
if links:
ret["links"] = links
return ret
def format_items(self, data, many):
"""Format data as a Resource object or list of Resource objects.
See: http://jsonapi.org/format/#document-resource-objects
"""
if many:
return [self.format_item(item) for item in data]
else:
return self.format_item(data)
def get_top_level_links(self, data, many):
"""Hook for adding links to the root of the response data."""
self_link = None
if many:
if self.opts.self_url_many:
self_link = self.generate_url(self.opts.self_url_many)
else:
if self.opts.self_url:
self_link = data.get("links", {}).get("self", None)
return {"self": self_link}
def get_resource_links(self, item):
"""Hook for adding links to a resource object."""
if self.opts.self_url:
ret = self.dict_class()
kwargs = resolve_params(item, self.opts.self_url_kwargs or {})
ret["self"] = self.generate_url(self.opts.self_url, **kwargs)
return ret
return None
def wrap_response(self, data, many):
"""Wrap data and links according to the JSON API """
ret = {"data": data}
# self_url_many is still valid when there isn't any data, but self_url
# may only be included if there is data in the ret
if many or data:
top_level_links = self.get_top_level_links(data, many)
if top_level_links["self"]:
ret["links"] = top_level_links
return ret
def generate_url(self, link, **kwargs):
"""Generate URL with any kwargs interpolated."""
return link.format_map(kwargs) if link else None
``` |
{
"source": "jnetosoares/PyMove",
"score": 2
} |
#### File: pymove/tests/test_preprossessing_stay_point_detection.py
```python
from numpy import nan
from pandas import DataFrame, Timestamp
from pandas.testing import assert_frame_equal
from pymove import MoveDataFrame, stay_point_detection
from pymove.utils.constants import DATETIME, LATITUDE, LONGITUDE, TRAJ_ID
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:15', 2],
]
list_data_test = [
[39.984093, 116.319237, '2008-10-23 05:53:05', 1],
[39.984200, 116.319321, '2008-10-23 05:53:06', 1],
[39.984222, 116.319405, '2008-10-23 05:53:11', 1],
[39.984211, 116.319389, '2008-10-23 05:53:16', 1],
[39.984219, 116.319420, '2008-10-23 05:53:21', 1],
]
def _prepare_default_df():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
cols = ['lat', 'lon', 'datetime', 'id']
return move_df, cols
def test_create_update_datetime_in_format_cyclical():
move_df, cols = _prepare_default_df()
stay_point_detection.create_update_datetime_in_format_cyclical(move_df)
print(move_df)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
0.979084,
0.203456,
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
0.979084,
0.203456,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.979084,
0.203456,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:15'),
2,
0.979084,
0.203456,
],
],
columns=cols + ['hour_sin', 'hour_cos'],
index=[0, 1, 2, 3],
)
assert_frame_equal(move_df, expected)
def test_create_or_update_move_stop_by_dist_time():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
cols = [
'segment_stop',
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'time_to_prev',
'speed_to_prev',
'stop',
]
stay_point_detection.create_or_update_move_stop_by_dist_time(
move_df, dist_radius=3.5, time_radius=0.5, inplace=True
)
expected = DataFrame(
data=[
[
1,
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan,
False,
],
[
2,
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
nan,
nan,
nan,
False,
],
[
3,
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
True,
],
[
3,
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:15'),
0.0,
4.0,
0.0,
True,
],
],
columns=cols,
index=[0, 1, 2, 3],
)
print(move_df)
assert_frame_equal(move_df, expected)
def test_create_update_move_and_stop_by_radius():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
cols = [
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'dist_to_next',
'dist_prev_to_next',
'situation',
]
stay_point_detection.create_update_move_and_stop_by_radius(
move_df, radius=4.0
)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
'nan',
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
nan,
nan,
'move',
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
'nan',
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:15'),
0.0,
nan,
nan,
'stop',
],
],
columns=cols,
index=[0, 1, 2, 3],
)
assert_frame_equal(move_df, expected)
```
#### File: pymove/utils/log.py
```python
from IPython import get_ipython
from IPython.display import display
from ipywidgets import HTML, IntProgress, VBox
from tqdm import tqdm
def log_progress(sequence, every=None, size=None, desc='Items'):
"""
Make and display a progress bar.
Parameters
----------
sequence : list.
Represents a elements sequence.
every : int, optional, default None.
Represents the steps in which the bar will be updated
size : int, optional, default None.
Represents the size/number elements in sequence.
desc : String, optional, default 'Items'.
Represents the description of the operation.
"""
is_iterator = False
if size is None:
try:
size = len(sequence)
except TypeError:
is_iterator = True
if size is not None:
if every is None:
if size <= 200:
every = 1
else:
every = int(size / 200)
else:
if every is None:
raise AssertionError('Sequence is iterator, set every')
if is_iterator:
progress = IntProgress(min=0, max=1, value=1)
progress.bar_style = 'info'
else:
progress = IntProgress(min=0, max=size, value=0)
label = HTML()
box = VBox(children=[label, progress])
display(box)
index = 0
try:
for index, record in enumerate(sequence, 1):
if index == 1 or index % every == 0:
if is_iterator:
label.value = '%s: %s / ?' % (desc, index)
else:
progress.value = index
label.value = u'%s: %s / %s' % (desc, index, size)
yield record
except Exception:
progress.bar_style = 'danger'
raise
else:
progress.bar_style = 'success'
progress.value = index
label.value = '%s: %s' % (desc, str(index or '?'))
try:
if get_ipython().__class__.__name__ == 'ZMQInteractiveShell':
progress_bar = log_progress
else:
raise NameError
except NameError:
progress_bar = tqdm
```
#### File: pymove/utils/mem.py
```python
from __future__ import print_function
import json
import os
import re
import time
from collections import deque
from itertools import chain
from sys import getsizeof, stderr
import numpy as np
import pandas as pd
import psutil
try:
import pwd
except (ModuleNotFoundError, ImportError):
from pymove.utils import _winmock as pwd
try:
from reprlib import repr
except ImportError:
pass
def proc_info():
"""
This functions retrieves informations about each jupyter notebook running in
the machine.
Returns
-------
dataframe
A dataframe with the following informations about
each jupyter notebook process:
- user : username
- pid : process identifier
- memory_GB : memory usage
- kernel_ID : kernel id
Examples
--------
>>> mem.get_proc_info()
user pid memory_GB kernel_ID
0 999999 11797 0.239374 74efe612-927f-4c1f-88a6-bb5fd32bc65c
1 999999 11818 0.172012 11c38dd6-8a65-4c45-90cf-0da5db65fa99
"""
UID = 1
regex = re.compile(r'.+kernel-(.+)\.json')
pids = [pid for pid in os.listdir('/proc') if pid.isdigit()]
# memory info from psutil.Process
df_mem = []
for pid in pids:
try:
ret = open(os.path.join('/proc', pid, 'cmdline'), 'rb').read()
ret_str = ret.decode('utf-8')
except IOError: # proc has already terminated
continue
# jupyter notebook processes
if (
len(ret_str) > 0
and ('jupyter' in ret_str or 'ipython' in ret_str)
and 'kernel' in ret_str
):
# kernel
kernel_ID = re.sub(regex, r'\1', ret_str)[0:-1]
# memory
process = psutil.Process(int(pid))
mem = process.memory_info()[0] / float(1e9)
# user name for pid
for ln in open('/proc/{0}/status'.format(int(pid))):
if ln.startswith('Uid:'):
uid = int(ln.split()[UID])
uname = pwd.getpwuid(uid).pw_name
# user, pid, memory, kernel_ID
df_mem.append([uname, pid, mem, kernel_ID])
df_mem = pd.DataFrame(df_mem)
df_mem.columns = ['user', 'pid', 'memory_GB', 'kernel_ID']
return df_mem
def session_info(sessions_str):
"""
This functions retrieve the path of each notebook running
in the current session.
Parameters
----------
sessions_str : str
Session
Returns
-------
dataframe
A dataframe with the following informations about
each jupyter notebook process:
- kernel_ID : kernel id
- notebook_path: path to the notebook
"""
sessions = json.loads(sessions_str)
df_nb = []
kernels = []
for sess in sessions:
kernel_ID = sess['kernel']['id']
if kernel_ID not in kernels:
notebook_path = sess['notebook']['path']
df_nb.append([kernel_ID, notebook_path])
kernels.append(kernel_ID)
df_nb = pd.DataFrame(df_nb)
df_nb.columns = ['kernel_ID', 'notebook_path']
return df_nb
def stats(sessions_str):
"""
This functions retrieve the path and information of each notebook
running in the current session.
Parameters
----------
sessions_str : str
Session
Returns
-------
dataframe
A dataframe with the following informations about
each jupyter notebook process:
- user : username
- pid : process identifier
- memory_GB : memory usage
- kernel_ID : kernel id
- notebook_path: path to the notebook
"""
df_mem = proc_info()
df_nb = session_info(sessions_str)
# joining tables
df = pd.merge(df_nb, df_mem, on=['kernel_ID'], how='right')
df = df.sort_values('memory_GB', ascending=False)
del df_mem
del df_nb
return df.reset_index(drop=True)
def reduce_mem_usage_automatic(df):
"""
Reduces the memory usage of the given dataframe.
Parameter
---------
df : dataframe
The input data to which the operation will be performed.
"""
start_mem = df.memory_usage().sum() / 1024 ** 2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if str(col_type) == 'int':
c_min = df[col].min()
c_max = df[col].max()
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif (
c_min > np.iinfo(np.uint8).min
and c_max < np.iinfo(np.uint8).max
):
df[col] = df[col].astype(np.uint8)
elif (
c_min > np.iinfo(np.int16).min
and c_max < np.iinfo(np.int16).max
):
df[col] = df[col].astype(np.int16)
elif (
c_min > np.iinfo(np.uint16).min
and c_max < np.iinfo(np.uint16).max
):
df[col] = df[col].astype(np.uint16)
elif (
c_min > np.iinfo(np.int32).min
and c_max < np.iinfo(np.int32).max
):
df[col] = df[col].astype(np.int32)
elif (
c_min > np.iinfo(np.uint32).min
and c_max < np.iinfo(np.uint32).max
):
df[col] = df[col].astype(np.uint32)
elif (
c_min > np.iinfo(np.int64).min
and c_max < np.iinfo(np.int64).max
):
df[col] = df[col].astype(np.int64)
elif (
c_min > np.iinfo(np.uint64).min
and c_max < np.iinfo(np.uint64).max
):
df[col] = df[col].astype(np.uint64)
elif col_type == np.float:
c_min = df[col].min()
c_max = df[col].max()
if (
c_min > np.finfo(np.float16).min
and c_max < np.finfo(np.float16).max
):
df[col] = df[col].astype(np.float16)
elif (
c_min > np.finfo(np.float32).min
and c_max < np.finfo(np.float32).max
):
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024 ** 2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print(
'Decreased by {:.1f} %'.format(100 * (start_mem - end_mem) / start_mem)
)
def total_size(o, handlers=None, verbose=False):
"""
Calculates the approximate memory footprint of an given object and all of
its contents. Automatically finds the contents of the following builtin
containers and their subclasses: tuple, list, deque, dict, set and
frozenset.
Parameters
----------
o : object
The object to calculate his memory footprint.
handlers : dict, optional(empty by default)
To search other containers, add handlers to iterate over their contents
handlers = {SomeContainerClass: iter,
OtherContainerClass: OtherContainerClass.get_elements}
verbose : boolean, optional(False by default)
If set to True, the following information will be printed for
each content of the object:
- the size of the object in bytes.
- his type_
- the object values
Returns
-------
float
The memory used by the given object
"""
if handlers is None:
handlers = {}
def dict_handler(d):
return chain.from_iterable(d.items())
all_handlers = {
tuple: iter,
list: iter,
deque: iter,
dict: dict_handler,
set: iter,
frozenset: iter,
}
# user handlers take precedence
all_handlers.update(handlers)
# track which object id"srs have already been seen
seen = set()
# estimate sizeof object without __sizeof__
default_size = getsizeof(0)
def sizeof(o):
# do not double count the same object
if id(o) in seen:
return 0
seen.add(id(o))
s = getsizeof(o, default_size)
if verbose:
print(s, type(o), repr(o), file=stderr)
for typ, handler in all_handlers.items():
if isinstance(o, typ):
s += sum(map(sizeof, handler(o)))
break
return s
return sizeof(o)
def begin_operation(name):
"""
Gets the stats for the current operation.
Parameters
----------
name: String
name of the operation
Returns
-------
dict
dictionary with the operation stats
"""
process = psutil.Process(os.getpid())
init = process.memory_info()[0]
start = time.time()
return {'process': process, 'init': init, 'start': start, 'name': name}
def end_operation(operation):
"""
Gets the time and memory usage of the operation.
Parameters
----------
operation: dict
dictionary with the begining stats of the operation
Returns
-------
dict
dictionary with the operation execution stats
"""
finish = operation['process'].memory_info()[0]
last_operation_name = operation['name']
last_operation_time_duration = time.time() - operation['start']
last_operation_mem_usage = finish - operation['init']
return {
'name': last_operation_name,
'time in seconds': last_operation_time_duration,
'memory': sizeof_fmt(last_operation_mem_usage),
}
def sizeof_fmt(mem_usage, suffix='B'):
"""
Returns the memory usage calculation of the last function.
Parameters
----------
mem_usage : int
memory usage in bytes
suffix: string, optional, default 'B'
suffix of the unit
Returns
-------
str
A string of the memory usage in a more readable format
"""
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']:
if abs(mem_usage) < 1024.0:
return '{:3.1f} {}{}'.format(mem_usage, unit, suffix)
mem_usage /= 1024.0
return '{:.1f} {}{}'.format(mem_usage, 'Yi', suffix)
def print_top_mem_vars(variables=None, n=10):
"""
Shows the sizes of the active variables
Parameters
----------
variables: locals() or globals(), default locals()
Whether to shows local or global variables
n: int
number of variables to print
"""
if variables is None:
variables = locals()
vars_ = ((name, getsizeof(value)) for name, value in variables.items())
for name, size in sorted(vars_, key=lambda x: -x[1])[:n]:
print('{:>30}: {:>8}'.format(name, sizeof_fmt(size)))
```
#### File: pymove/utils/trajectories.py
```python
from __future__ import division
import folium
import numpy as np
import pandas as pd
from pymove.utils.constants import (
DATETIME,
LATITUDE,
LONGITUDE,
TILES,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
)
def read_csv(
filename,
sep=',',
encoding='utf-8',
header='infer',
names=None,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
type_=TYPE_PANDAS,
n_partitions=1,
):
"""
Reads a .csv file and structures the data into the desired structure
supported by PyMove.
Parameters
----------
filename : String.
Represents coordinates lat, lon which will be the center of the map.
sep : String, optional, default ','.
Delimiter to use.
encoding : String, optional, default 'utf-8'.
Encoding to use for UTF when reading/writing
header: int, list of int, default ‘infer’
Row number(srs) to use as the column names, and the start of the data.
Default behavior is to infer the column names: if no names are passed
the behavior is identical to header=0 and column names are inferred from
the first line of the file, if column names are passed explicitly then
the behavior is identical to header=None
names: array-like, optional
List of column names to use. If the file contains a header row,
then you should explicitly pass header=0 to override the column names.
Duplicates in this list are not allowed.
latitude : String, optional, default 'lat'.
Represents the column name of feature latitude.
longitude : String, optional, default 'lon'.
Represents the column name of feature longitude.
datetime : String, optional, default 'datetime'.
Represents the column name of feature datetime.
traj_id : String, optional, default 'id'.
Represents the column name of feature id trajectory.
type_ : String, optional, default 'pandas'.
Represents the type of the MoveDataFrame
n_partitions : int, optional, default 1.
Represents number of partitions for DaskMoveDataFrame
Returns
-------
pymove.core.MoveDataFrameAbstract subclass.
Trajectory data.
"""
df = pd.read_csv(
filename,
sep=sep,
encoding=encoding,
header=header,
names=names,
parse_dates=[datetime],
)
from pymove import PandasMoveDataFrame as pm
from pymove import DaskMoveDataFrame as dm
if type_ == TYPE_PANDAS:
return pm(df, latitude, longitude, datetime, traj_id)
if type_ == TYPE_DASK:
return dm(df, latitude, longitude, datetime, traj_id, n_partitions)
def format_labels(current_id, current_lat, current_lon, current_datetime):
"""
Format the labels for the PyMove lib pattern labels output
lat, lon and datatime.
Parameters
----------
current_id : String.
Represents the column name of feature id.
current_lat : String.
Represents the column name of feature latitude.
current_lon : String.
Represents the column name of feature longitude.
current_datetime : String.
Represents the column name of feature datetime.
Returns
-------
dict
Represents a dict with mapping current columns of data
to format of PyMove column.
"""
return {
current_id: TRAJ_ID,
current_lon: LONGITUDE,
current_lat: LATITUDE,
current_datetime: DATETIME
}
def shift(arr, num, fill_value=np.nan):
"""
Shifts the elements of the given array by the number of periods specified.
Parameters
----------
arr : array.
The array to be shifted.
num : int.
Number of periods to shift. Can be positive or negative.
If posite, the elements will be pulled down, and pulled up otherwise.
fill_value : int, optional, default np.nan.
The scalar value used for newly introduced missing values.
Returns
-------
array
A new array with the same shape and type_ as the initial given array,
but with the indexes shifted.
Notes
-----
Similar to pandas shift, but faster.
References
--------
https://stackoverflow.com/questions/30399534/shift-elements-in-a-numpy-array
"""
result = np.empty_like(arr)
if num > 0:
result[:num] = fill_value
result[num:] = arr[:-num]
elif num < 0:
result[num:] = fill_value
result[:num] = arr[-num:]
else:
result = arr
return result
def fill_list_with_new_values(original_list, new_list_values):
"""
Copies elements from one list to another. The elements will be positioned in
the same position in the new list as they were in their original list.
Parameters
----------
original_list : list.
The list to which the elements will be copied.
new_list_values : list.
The list from which elements will be copied.
"""
n = len(new_list_values)
original_list[:n] = new_list_values
def save_bbox(bbox_tuple, file, tiles=TILES[0], color='red'):
"""
Save bbox as file .html using Folium.
Parameters
----------
bbox_tuple : tuple.
Represents a bound box, that is a tuple of 4 values with the
min and max limits of latitude e longitude.
file : String.
Represents filename.
tiles : String, optional, default 'OpenStreetMap'.
Represents tyles'srs type_.
Example: 'openstreetmap', 'cartodbpositron',
'stamentoner', 'stamenterrain',
'mapquestopen', 'MapQuest Open Aerial',
'Mapbox Control Room' and 'Mapbox Bright'.
color : String, optional, default 'red'.
Represents color of lines on map.
Examples
--------
>>> from pymove.trajectories import save_bbox
>>> bbox = (22.147577, 113.54884299999999, 41.132062, 121.156224)
>>> save_bbox(bbox, 'bbox.html')
"""
m = folium.Map(tiles=tiles)
m.fit_bounds(
[[bbox_tuple[0], bbox_tuple[1]], [bbox_tuple[2], bbox_tuple[3]]]
)
points_ = [
(bbox_tuple[0], bbox_tuple[1]),
(bbox_tuple[0], bbox_tuple[3]),
(bbox_tuple[2], bbox_tuple[3]),
(bbox_tuple[2], bbox_tuple[1]),
(bbox_tuple[0], bbox_tuple[1]),
]
folium.PolyLine(points_, weight=3, color=color).add_to(m)
m.save(file)
``` |
{
"source": "jnettels/DHNx",
"score": 3
} |
#### File: DHNx/dhnx/graph.py
```python
import pandas as pd
import networkx as nx
def thermal_network_to_nx_graph(thermal_network):
r"""
Parameters
----------
thermal_network : dhnx.network.ThermalNetwork
Returns
-------
nx_graph : nx:MultiDigraph
networkx graph of thermal_network
"""
nx_graph = nx.DiGraph() # TODO: Check if this line can be removed.
edges = thermal_network.components['pipes'].copy()
edge_attr = list(edges.columns)
edge_attr.remove('from_node')
edge_attr.remove('to_node')
nx_graph = nx.from_pandas_edgelist(
edges,
'from_node',
'to_node',
edge_attr=edge_attr,
create_using=nx_graph
)
nodes = {
list_name: thermal_network.components[list_name].copy() for list_name in [
'consumers', # TODO: Do not hard code these here
'producers',
'forks'
]
}
for k, v in nodes.items():
v.index = [k + '-' + str(id) for id in v.index]
nodes = pd.concat(nodes.values(), sort=True)
node_attrs = {node_id: dict(data) for node_id, data in nodes.iterrows()}
nx.set_node_attributes(nx_graph, node_attrs)
return nx_graph
def nx_graph_to_thermal_network(nx_graph):
r"""
Creates ThermalNetwork from nx.MultiDigraph
Parameters
----------
nx_graph : nx.MultiDigraph
Returns
-------
thermal_network : ThermalNetwork
"""
raise NotImplementedError('This feature is not implemented yet.')
def write_edge_data_to_graph(series, graph_in, var_name=None):
r"""
Writes data describing the edges to the graph. Data has to
be a pd.Series labeled with the (from, to). If the series has
a name, the data will be stored in the graph under that name.
If not, `var_name` has to be provided.
Parameters
----------
series
graph_in
var_name
Returns
-------
"""
graph = graph_in.copy()
assert isinstance(series, pd.Series), \
"Have to pass a pandas Series."
if var_name:
pass
elif series.name:
var_name = series.name
else:
raise ValueError(r"Have to either pass Series with name or provide var_name.")
for index, value in series.iteritems():
graph.edges[index][var_name] = value
return graph
```
#### File: dhnx/optimization_modules/oemof_heatpipe.py
```python
from pyomo.core.base.block import SimpleBlock
from pyomo.environ import (Set, NonNegativeReals, Var, Constraint)
import warnings
from oemof.solph.network import Transformer
from oemof.solph.plumbing import sequence
from oemof.solph import Investment
from collections import namedtuple
class Label(namedtuple('solph_label', ['tag1', 'tag2', 'tag3', 'tag4'])):
__slots__ = ()
def __str__(self):
"""The string is used within solph as an ID, so it hast to be unique"""
return '_'.join(map(str, self._asdict().values()))
class HeatPipeline(Transformer):
r"""A HeatPipeline represent a Pipeline in a district heating system.
This is done by a Transformer with a constant energy loss independent of
actual power, but dependent on the nominal power and the length parameter.
The HeatPipeline is a single-input-single-output transformer. Additionally,
conversion factors for in- and output flow can be applied.
Parameters
----------
length : float
Length of HeatPipeline.
heat_loss_factor : float
Heat loss per length unit as fraction of the nominal power. Can also be
defined by a series.
See also :py:class:`~oemof.solph.network.Transformer`.
Note
----
This component is experimental. Use it with care.
The following sets, variables, constraints and objective parts are created
* :py:class:`~oemof.solph.custom.HeatPipelineBlock` (if no
Investment object present)
* :py:class:`~oemof.solph.custom.HeatPipelineInvestBlock` (if
Investment object present)
Examples
--------
example
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.heat_loss_factor = sequence(kwargs.get('heat_loss_factor', 0))
self.heat_loss_factor_fix = sequence(kwargs.get(
'heat_loss_factor_fix', 0))
self._invest_group = False
self._nonconvex_group = False
self._demand_group = False
if len(self.inputs) > 1 or len(self.outputs) > 2:
if len(self.outputs) == 2:
self._demand_group = True
else:
raise ValueError("Heatpipe must not have more than"
" one input and two outputs!")
for f in self.inputs.values():
if f.nonconvex is not None:
raise ValueError(
"Inputflow must not be of type NonConvexFlow!")
for f in self.outputs.values():
if f.nonconvex is not None:
self._nonconvex_group = True
self._check_flows_invest()
if (self._nonconvex_group is True) and (self._invest_group is True):
raise ValueError(
"Either an investment OR a switchable heatloss can be set"
" (NonConvexFlow)."
" Remove the NonConvexFlow or drop "
"the Investment attribute.")
if self._invest_group is True:
self._set_flows()
o = list(self.outputs.keys())[0]
if (self.heat_loss_factor_fix[0] > 0) \
and (self.outputs[o].investment.nonconvex is False):
warnings.warn(
"Be careful! In case of a convex Investment "
"(Investment.nonconvex is False), the "
"'heat_loss_factor_fix' is considered, even though the "
"investment might be zero! => A simple sink could be "
"the results. Hopefully, you know what you are doing.")
else:
self._set_nominal_value()
def _check_flows_invest(self):
for flow in self.inputs.values():
if isinstance(flow.investment, Investment):
raise ValueError(
"The investment must be defined at the Outputflow!")
for flow in self.outputs.values():
if isinstance(flow.investment, Investment):
self._invest_group = True
def _set_flows(self):
# sets the input flow to investment
for flow in self.inputs.values():
flow.investment = Investment()
# set nominal values of in- and outflow equal in case of invest_group = False
def _set_nominal_value(self):
i = list(self.inputs.keys())[0]
o = list(self.outputs.keys())[0]
if self.outputs[o].nominal_value is not None:
self.inputs[i].nominal_value = \
self.outputs[o].nominal_value
elif self.inputs[i].nominal_value is not None:
self.outputs[o].nominal_value = \
self.inputs[i].nominal_value
def constraint_group(self):
if self._invest_group is True:
return HeatPipelineInvestBlock
else:
return HeatPipelineBlock
class HeatPipelineBlock(SimpleBlock):
r"""Block representing a pipeline of a district heating system.
:class:`~oemof.solph.custom.HeatPipeline`
**The following constraints are created:**
.. _HeatPipelineBlock-equations:
.. math::
&
(1) \dot{Q}_{out}(t) = \dot{Q}_{in}(t) \cdot
\frac{\eta_{out}}{\eta_{in}} - \dot{Q}_{loss}(t)\\
&
(2) \dot{Q}_{loss}(t) = f_{loss}(t) \cdot l \cdot \dot{Q}_{nominal}
&
The symbols used are defined as follows
(with Variables (V) and Parameters (P)):
.. csv-table::
:header: "symbol", "attribute", "type", "explanation"
:widths: 1, 1, 1, 1
":math:`\dot{Q}_{out}(t)`", ":py:obj:`flow[n, o, t]`", "V", "Heat
output"
":math:`\dot{Q}_{in}(t)`", ":py:obj:`flow[i, n, t]`", "V", "Heat input"
":math:`\dot{Q}_{loss}(t)`", ":py:obj:`heat_loss[n, t]`", "P", "Heat
loss of heat pipeline"
":math:`\dot{Q}_{nominal}`", ":py:obj:`flows[n, o].nominal_value`", "
P", "Nominal capacity of heating pipeline"
":math:`\eta_{out}`", ":py:obj:`conversion_factors[o][t]`", "P", "
Conversion factor of output flow (Heat Output)"
":math:`\eta_{in}`", ":py:obj:`conversion_factors[i][t]`", "P", "
Conversion factor of input flow (Heat Input)"
":math:`f_{loss}(t)`", ":py:obj:`heat_loss_factor`", "P", "Specific
heat loss factor for pipeline"
":math:`l`", ":py:obj:`length`", "P", "Length of heating pipeline"
"""
CONSTRAINT_GROUP = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
""" Creates the linear constraint for the class:`Heatpipe`
block.
Parameters
----------
group : list
"""
if group is None:
return None
m = self.parent_block()
self.HEATPIPES = Set(initialize=[n for n in group])
self.CONVEX_HEATPIPES = Set(initialize=[
n for n in group if n.outputs[list(n.outputs.keys())[0]].nonconvex is None])
self.NONCONVEX_HEATPIPES = Set(initialize=[
n for n in group if n.outputs[list(n.outputs.keys())[0]].nonconvex is not None])
# Defining Variables
self.heat_loss = Var(self.HEATPIPES, m.TIMESTEPS,
within=NonNegativeReals)
def _heat_loss_rule_fix(block, n, t):
"""Rule definition for the heat loss depending on the nominal
capacity for fix fix heat loss.
"""
o = list(n.outputs.keys())[0]
expr = 0
expr += - block.heat_loss[n, t]
expr +=\
n.heat_loss_factor[t] * m.flows[n, o].nominal_value
expr += n.heat_loss_factor_fix[t]
return expr == 0
self.heat_loss_equation_fix = Constraint(
self.CONVEX_HEATPIPES, m.TIMESTEPS, rule=_heat_loss_rule_fix)
def _heat_loss_rule_on_off(block, n, t):
"""Rule definition for the heat loss depending on the nominal
capacity. Here, the losses can be "switched off".
"""
o = list(n.outputs.keys())[0]
expr = 0
expr += - block.heat_loss[n, t]
expr += \
(n.heat_loss_factor[t] * m.flows[n, o].nominal_value + n.heat_loss_factor_fix[t]) *\
m.NonConvexFlow.status[n, o, t]
return expr == 0
self.heat_loss_equation_on_off = Constraint(
self.NONCONVEX_HEATPIPES, m.TIMESTEPS, rule=_heat_loss_rule_on_off)
def _relation_rule(block, n, t):
"""Link input and output flow and subtract heat loss."""
i = list(n.inputs.keys())[0]
o = list(n.outputs.keys())[0]
expr = 0
expr += - m.flow[n, o, t]
expr += m.flow[i, n, t] * n.conversion_factors[
o][t] / n.conversion_factors[i][t]
expr += - block.heat_loss[n, t]
return expr == 0
self.relation = Constraint(self.HEATPIPES, m.TIMESTEPS,
rule=_relation_rule)
class HeatPipelineInvestBlock(SimpleBlock):
r"""Block representing a pipeline of a district heating system.
:class:`~oemof.solph.custom.HeatPipeline`
**The following constraints are created:**
.. _HeatPipelineInvestBlock-equations:
.. math::
&
(1) \dot{Q}_{out}(t) = \dot{Q}_{in}(t) \cdot
\frac{\eta_{out}}{\eta_{in}} - \dot{Q}_{loss}(t)\\
&
(2) \dot{Q}_{loss}(t) = f_{loss}(t) \cdot l \cdot \dot{Q}_{nominal}
&
The symbols used are defined as follows
(with Variables (V) and Parameters (P)):
.. csv-table::
:header: "symbol", "attribute", "type", "explanation"
:widths: 1, 1, 1, 1
":math:`\dot{Q}_{out}(t)`", ":py:obj:`flow[n, o, t]`", "V", "Heat
output"
":math:`\dot{Q}_{in}(t)`", ":py:obj:`flow[i, n, t]`", "V", "Heat input"
":math:`\dot{Q}_{loss}(t)`", ":py:obj:`heat_loss[n, t]`", "V", "Heat
loss of heat pipeline"
":math:`\dot{Q}_{nominal}`", ":py:obj:`flows[n, o].nominal_value`", "
V", "Nominal capacity of heating pipeline"
":math:`\eta_{out}`", ":py:obj:`conversion_factors[o][t]`", "P", "
Conversion factor of output flow (heat output)"
":math:`\eta_{in}`", ":py:obj:`conversion_factors[i][t]`", "P", "
Conversion factor of input flow (heat input)"
":math:`f_{loss}(t)`", ":py:obj:`heat_loss_factor`", "P", "Specific
heat loss factor for pipeline"
":math:`l`", ":py:obj:`length`", "P", "Length of heating pipeline"
"""
CONSTRAINT_GROUP = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
""" Creates the linear constraint for the class:`HeatPipeline`
block.
Parameters
----------
group : list
"""
if group is None:
return None
m = self.parent_block()
# Defining Sets
self.INVESTHEATPIPES = Set(initialize=[n for n in group])
self.CONVEX_INVESTHEATPIPES = Set(initialize=[
n for n in group if n.outputs[list(n.outputs.keys())[0]].investment.nonconvex is False])
self.NONCONVEX_INVESTHEATPIPES = Set(initialize=[
n for n in group if n.outputs[list(n.outputs.keys())[0]].investment.nonconvex is True])
self.INVESTHEATPIPES_NO_DEMAND = Set(
initialize=[n for n in group if len(n.outputs.keys()) == 1])
self.INVESTHEATPIPES_WITH_DEMAND = Set(
initialize=[n for n in group if len(n.outputs.keys()) == 2])
# Defining Variables
self.heat_loss = Var(self.INVESTHEATPIPES, m.TIMESTEPS,
within=NonNegativeReals)
def _heat_loss_rule_convex(block, n, t):
"""Rule definition for constraint to connect the installed capacity
and the heat loss
"""
expr = 0
expr += - block.heat_loss[n, t]
expr += n.heat_loss_factor[t] * m.InvestmentFlow.invest[n, list(n.outputs.keys())[0]]
expr += n.heat_loss_factor_fix[t]
return expr == 0
self.heat_loss_equation_convex = Constraint(
self.CONVEX_INVESTHEATPIPES, m.TIMESTEPS, rule=_heat_loss_rule_convex)
def _heat_loss_rule_nonconvex(block, n, t):
"""Rule definition for constraint to connect the installed capacity
and the heat loss
"""
expr = 0
expr += - block.heat_loss[n, t]
expr += n.heat_loss_factor[t] * m.InvestmentFlow.invest[n, list(n.outputs.keys())[0]]
expr += n.heat_loss_factor_fix[t] * \
m.InvestmentFlow.invest_status[n, list(n.outputs.keys())[0]]
return expr == 0
self.heat_loss_equation_nonconvex = Constraint(
self.NONCONVEX_INVESTHEATPIPES, m.TIMESTEPS, rule=_heat_loss_rule_nonconvex)
def _relation_rule_no_demand(block, n, t):
"""Link input and output flow and subtract heat loss."""
i = list(n.inputs.keys())[0]
o = list(n.outputs.keys())[0]
expr = 0
expr += - m.flow[n, o, t]
expr += m.flow[i, n, t] * n.conversion_factors[
o][t] / n.conversion_factors[i][t]
expr += - block.heat_loss[n, t]
return expr == 0
self.relation_no_demand = Constraint(
self.INVESTHEATPIPES_NO_DEMAND, m.TIMESTEPS, rule=_relation_rule_no_demand)
def _relation_rule_with_demand(block, n, t):
"""Link input and output flow and subtract heat loss."""
i = list(n.inputs.keys())[0]
o = list(n.outputs.keys())[0]
d = list(n.outputs.keys())[1]
expr = 0
expr += - m.flow[n, o, t]
expr += m.flow[i, n, t] * n.conversion_factors[
o][t] / n.conversion_factors[i][t]
expr += - block.heat_loss[n, t]
expr += - m.flow[n, d, t]
return expr == 0
self.relation_with_demand = Constraint(
self.INVESTHEATPIPES_WITH_DEMAND, m.TIMESTEPS, rule=_relation_rule_with_demand)
def _inflow_outflow_invest_coupling_rule(block, n):
"""Rule definition of constraint connecting the inflow
`InvestmentFlow.invest of pipe with invested outflow `invest`
by nominal_storage_capacity__inflow_ratio
"""
i = list(n.inputs.keys())[0]
o = list(n.outputs.keys())[0]
expr = (m.InvestmentFlow.invest[i, n] == m.InvestmentFlow.invest[n, o])
return expr
self.inflow_outflow_invest_coupling = Constraint(
self.INVESTHEATPIPES, rule=_inflow_outflow_invest_coupling_rule)
```
#### File: DHNx/tests/test_units.py
```python
import os
import numpy as np
import dhnx
basedir = os.path.dirname(__file__)
dir_import = os.path.join(basedir, '_files/looped_network_import')
thermal_network = dhnx.network.ThermalNetwork(dir_import)
def test_add():
thermal_network.add('Producer', 5, lat=1, lon=1)
assert thermal_network.components['producers'].loc[5].to_list() == [1., 1., np.nan]
def test_remove():
thermal_network.remove('Consumer', 1)
assert 4 not in thermal_network.components['consumers'].index
``` |
{
"source": "jnettels/reegis",
"score": 3
} |
#### File: reegis/reegis/demand_heat.py
```python
import os
import logging
# External libraries
import pandas as pd
from workalendar.europe import Germany
# oemof libraries
import demandlib.bdew as bdew
# internal modules
from reegis import config as cfg
from reegis import bmwi
from reegis import geometries
from reegis import energy_balance
from reegis import coastdat
from reegis import inhabitants
def heat_demand(year):
"""
Fetch heat demand per sector from the federal states energy balances.
If the share between domestic and retail does not exist the share from
the german balance is used. If this value also not exists a default
share of 0.5 is used.
Parameters
----------
year
Returns
-------
pandas.DataFrame
Examples
--------
>>> hd=heat_demand(2014)
>>> hd.loc[('MV', 'domestic'), 'district heating']
5151.5
"""
eb = energy_balance.get_usage_balance(year)
eb.sort_index(inplace=True)
# get fraction of domestic and retail from the german energy balance
share = energy_balance.get_domestic_retail_share(year)
# Use 0.5 for both sectors if no value is given
share.fillna(0.5, inplace=True)
# Divide domestic and retail by the value of the german energy balance if
# the sum of domestic and retail does not equal the value given in the
# local energy balance.
check_value = True
for state in eb.index.get_level_values(0).unique():
for col in eb.columns:
check = (
eb.loc[(state, "domestic"), col]
+ eb.loc[(state, "retail"), col]
- eb.loc[(state, "domestic and retail"), col]
).round()
if check < 0:
for sector in ["domestic", "retail"]:
try:
eb.loc[(state, sector), col] = (
eb.loc[(state, "domestic and retail"), col]
* share.loc[col, sector]
)
except KeyError:
eb.loc[(state, sector), col] = (
eb.loc[(state, "domestic and retail"), col] * 0.5
)
check = (
eb.loc[(state, "domestic"), col]
+ eb.loc[(state, "retail"), col]
- eb.loc[(state, "domestic and retail"), col]
).round()
if check < 0:
logging.error(
"In {0} the {1} sector results {2}".format(
state, col, check
)
)
check_value = False
if check_value:
logging.debug("Divides 'domestic and retail' without errors.")
# Reduce energy balance to the needed columns and group by fuel groups.
eb = eb.loc[
(slice(None), ["industrial", "domestic", "retail"]),
]
eb = eb.groupby(by=cfg.get_dict("FUEL_GROUPS_HEAT_DEMAND"), axis=1).sum()
# Remove empty columns
for col in eb.columns:
if not (
eb.loc[(slice(None), "domestic"), col].sum() > 0
or eb.loc[(slice(None), "retail"), col].sum() > 0
or eb.loc[(slice(None), "industrial"), col].sum() > 0
):
del eb[col]
# The use of electricity belongs to the electricity sector. It is possible
# to connect it to the heating sector for future scenarios.
del eb["electricity"]
del eb["total"] # if electricity is removed total is not correct anymore.
# get fraction of mechanical energy use and subtract it from the balance to
# get the use of heat only.
share_mech = share_of_mechanical_energy_bmwi(year)
for c in share_mech.columns:
for i in share_mech.index:
eb.loc[(slice(None), c), i] -= (
eb.loc[(slice(None), c), i] * share_mech.loc[i, c]
)
eb.sort_index(inplace=True)
return eb
def share_of_mechanical_energy_bmwi(year):
"""
Get share of mechanical energy from the overall energy use per sector.
Parameters
----------
year : int
Returns
-------
pandas.DataFrame
Examples
--------
>>> share_of_mechanical_energy_bmwi(2014).loc['oil', 'retail']
0.078
"""
mech = pd.DataFrame()
fs = bmwi.read_bmwi_sheet_7("a")
fs.sort_index(inplace=True)
sector = "Industrie"
total = float(fs.loc[(sector, "gesamt"), year])
mech[sector] = (
fs.loc[(sector, "mechanische Energie"), year].div(total).round(3)
)
fs = bmwi.read_bmwi_sheet_7("b")
fs.sort_index(inplace=True)
for sector in fs.index.get_level_values(0).unique():
total = float(fs.loc[(sector, "gesamt"), year])
mech[sector] = (
fs.loc[(sector, "mechanische Energie"), year]
.div(total)
.astype(float)
.round(3)
)
mech.drop(" - <NAME>", inplace=True)
mech.drop("mechanische Energie", inplace=True)
ren_col = {
"Industrie": "industrial",
"Gewerbe, Handel, Dienstleistungen ": "retail",
"private Haushalte": "domestic",
}
ren_index = {
" - <NAME>": "oil",
" - <NAME>": "natural gas",
}
mech.index.name = ""
mech.rename(columns=ren_col, inplace=True)
mech.rename(index=ren_index, inplace=True)
mech.fillna(0, inplace=True)
return mech
def get_heat_profile_from_demandlib(
temperature, annual_demand, sector, year, build_class=1
):
"""
Create an hourly load profile from the annual demand using the demandlib.
Parameters
----------
temperature : pandas.Series
annual_demand : float
sector : str
year : int
build_class : int
Returns
-------
pandas.DataFrame
Examples
--------
>>> temperature=pd.Series(list(range(50)), index=pd.date_range(
... '2014-05-03 12:00', periods=50, freq='h'))
>>> temperature=10 + temperature * 0.1
>>> hp=get_heat_profile_from_demandlib(
... temperature, 5345, 'retail', 2014)
>>> round(hp.sum())
5302.0
"""
cal = Germany()
holidays = dict(cal.holidays(year))
if "efh" in sector:
shlp_type = "EFH"
elif "mfh" in sector:
shlp_type = "MFH"
elif "domestic" in sector:
shlp_type = "MFH"
elif "retail" in sector:
shlp_type = "ghd"
build_class = 0
elif "industrial" in sector:
shlp_type = "ghd"
build_class = 0
else:
raise AttributeError('"{0}" is an unknown sector.'.format(sector))
return bdew.HeatBuilding(
temperature.index,
holidays=holidays,
temperature=temperature,
shlp_type=shlp_type,
wind_class=0,
building_class=build_class,
annual_heat_demand=annual_demand,
name=sector,
ww_incl=True,
).get_bdew_profile()
def get_heat_profiles_by_federal_state(
year, to_csv=None, state=None, weather_year=None
):
"""
Get heat profiles by state, sector and fuel. Use the pandas `groupby`
method to group the results.
The unit of the resulting data is TJ.
Parameters
----------
year : int
Year of the demand data set.
to_csv : str
Path to the csv file.
state : list or None
List of abbreviations of federal states. If None a table with all
federal states will be returned. Valid values are: BB, BE, BW, BY, HB,
HE, HH, MV, NI, NW, RP, SH, SL, SN, ST, TH
weather_year : int or None
Can be used if the year of the weather data differs from the year of
the demand data. If None the year parameter will be used. Use with
care, because the demand data may include implicit weather effects.
Returns
-------
pd.DataFrame
Examples
--------
>>> fn=os.path.join(os.path.expanduser('~'),
... 'heat_profile.reegis_doctest.csv')
>>> hp=get_heat_profiles_by_federal_state(2014, state=['BE', 'BB'],
... to_csv=fn)
>>> hp.groupby(level=[0, 1], axis=1).sum().sum().round(1)
BB domestic 66822.4
industrial 69668.0
retail 23299.5
BE domestic 67382.1
industrial 6162.8
retail 39364.9
dtype: float64
>>> round(hp.groupby(level=[0, 2], axis=1).sum().sum().loc['BB'], 1)
district heating 17646.9
gas 3916.3
hard coal 21378.4
lignite 5630.5
natural gas 63840.8
oil 16257.4
other 1112.1
re 30007.4
dtype: float64
>>> hp_MWh=hp.div(0.0036)
>>> round(hp_MWh.groupby(level=[2], axis=1).sum().sum().loc['lignite'], 1)
1671427.4
>>> round(hp.sum().sum(), 1)
272699.7
"""
if weather_year is None:
weather_year = year
building_class = {}
for (k, v) in cfg.get_dict("building_class").items():
for s in v.split(", "):
building_class[s] = int(k)
demand_state = heat_demand(year).sort_index()
temperatures = coastdat.federal_state_average_weather(
weather_year, "temp_air"
)
temperatures = temperatures.tz_convert("Europe/Berlin")
my_columns = pd.MultiIndex(levels=[[], [], []], codes=[[], [], []])
heat_profiles = pd.DataFrame(columns=my_columns)
if state is None:
states = demand_state.index.get_level_values(0).unique()
else:
states = state
# for region in demand_state.index.get_level_values(0).unique():
for region in states:
logging.info("Creating heat profile for {}".format(region))
tmp = demand_state.loc[region].groupby(level=0).sum()
temperature = temperatures[region] - 273
for fuel in tmp.columns:
logging.debug(
"{0} - {1} ({2})".format(region, fuel, building_class[region])
)
for sector in tmp.index:
heat_profiles[
(region, sector, fuel)
] = get_heat_profile_from_demandlib(
temperature,
tmp.loc[sector, fuel],
sector,
year,
building_class[region],
)
heat_profiles.sort_index(1, inplace=True)
if to_csv is not None:
heat_profiles.to_csv(to_csv)
return heat_profiles
def get_heat_profiles_by_region(
regions, year, name="region", from_csv=None, to_csv=None, weather_year=None
):
"""
Get heat profiles for any region divided by sector and fuel. Use the
pandas `groupby` method to group the results.
The unit of the resulting data is TJ.
Parameters
----------
year : int
Year of the demand data set.
regions : geopandas.geoDataFrame
A table with region geometries and there id as index.
name : str
Name of the regions set.
from_csv : str
Path to the file of the demand state profiles.
to_csv : str
Path with filename of the output file.
weather_year : int or None
Can be used if the year of the weather data differs from the year of
the demand data. If None the year parameter will be used. Use with
care, because the demand data may include implicit weather effects.
Returns
-------
pd.DataFrame
Examples
--------
>>> from reegis import geometries
>>> fn=os.path.join(os.path.expanduser('~'),
... 'heat_profile.reegis_doctest.csv')
>>> regions=geometries.load(
... cfg.get('paths', 'geometry'),
... cfg.get('geometry', 'de21_polygons'))
>>> hpl=get_heat_profiles_by_region(regions, 2014, from_csv=fn)
>>> round(hpl.sum().sum(), 1)
272699.7
>>> os.remove(fn)
"""
if weather_year is None:
weather_year = year
# Get demand by federal state
if from_csv is None:
from_csv = os.path.join(
cfg.get("paths", "demand"),
cfg.get("demand", "heat_profile_state_var").format(
year=year, weather_year=weather_year
),
)
if not os.path.isfile(from_csv):
get_heat_profiles_by_federal_state(
year, to_csv=from_csv, weather_year=weather_year
)
demand_state = pd.read_csv(from_csv, index_col=[0], header=[0, 1, 2])
# Create empty MulitIndex DataFrame to take the results
four_level_columns = pd.MultiIndex(
levels=[[], [], [], []], codes=[[], [], [], []]
)
demand_region = pd.DataFrame(
index=demand_state.index, columns=four_level_columns
)
# Get inhabitants for federal states and the given regions
ew = inhabitants.get_share_of_federal_states_by_region(year, regions, name)
# Use the inhabitants to recalculate the demand from federal states to
# the given regions.
for i in ew.items():
state = i[0][1]
region = i[0][0]
share = i[1]
if state in demand_state.columns.get_level_values(0).unique():
for sector in (
demand_state[state].columns.get_level_values(0).unique()
):
for fuel in demand_state[state, sector].columns:
demand_region[region, fuel, sector, state] = (
demand_state[state, sector, fuel] * share
)
demand_region.sort_index(1, inplace=True)
demand_region = demand_region.groupby(level=[0, 1, 2], axis=1).sum()
if to_csv is not None:
demand_region.to_csv(to_csv)
return demand_region
if __name__ == "__main__":
pass
```
#### File: reegis/reegis/opsd.py
```python
import os
import logging
import datetime
# Internal modules
from reegis import config as cfg
from reegis import geometries as geo
# External libraries
import numpy as np
import pandas as pd
import pyproj
import requests
from shapely.wkt import loads as wkt_loads
import warnings
warnings.filterwarnings("ignore", category=pd.errors.DtypeWarning)
def convert_utm_code_opsd(df):
# *** Convert utm if present ***
utm_zones = list()
df_utm = None
# Get all utm zones.
if "utm_zone" in df:
df_utm = df.loc[(df.lon.isnull()) & (df.utm_zone.notnull())]
utm_zones = df_utm.utm_zone.unique()
# Loop over utm zones and convert utm coordinates to latitude/longitude.
for zone in utm_zones:
my_utm = pyproj.Proj(
"+proj=utm +zone={0},+north,+ellps=WGS84,".format(str(int(zone)))
+ "+datum=WGS84,+units=m,+no_defs"
)
utm_df = df_utm.loc[
df_utm.utm_zone == int(zone), ("utm_east", "utm_north")
]
coord = my_utm(
utm_df.utm_east.values, utm_df.utm_north.values, inverse=True
)
df.loc[(df.lon.isnull()) & (df.utm_zone == int(zone)), "lat"] = coord[
1
]
df.loc[(df.lon.isnull()) & (df.utm_zone == int(zone)), "lon"] = coord[
0
]
return df
def guess_coordinates_by_postcode_opsd(df):
# *** Use postcode ***
if "postcode" in df:
df_pstc = df.loc[(df.lon.isnull() & df.postcode.notnull())]
pstc = None
if len(df_pstc) > 0:
pstc = pd.read_csv(
os.path.join(
cfg.get("paths", "geometry"),
cfg.get("geometry", "postcode_polygon"),
),
index_col="zip_code",
)
for idx, val in df_pstc.iterrows():
try:
# If the postcode is not number the integer conversion will
# raise a ValueError. Some postcode look like this '123XX'.
# It would be possible to add the mayor regions to the postcode
# map in order to search for the first two/three digits.
postcode = int(val.postcode)
if postcode in pstc.index:
df.loc[df.id == val.id, "lon"] = wkt_loads(
pstc.loc[postcode].values[0]
).centroid.x
df.loc[df.id == val.id, "lat"] = wkt_loads(
pstc.loc[postcode].values[0]
).centroid.y
# Replace the last number with a zero and try again.
elif round(postcode / 10) * 10 in pstc.index:
postcode = round(postcode / 10) * 10
df.loc[df.id == val.id, "lon"] = wkt_loads(
pstc.loc[postcode].values[0]
).centroid.x
df.loc[df.id == val.id, "lat"] = wkt_loads(
pstc.loc[postcode].values[0]
).centroid.y
else:
logging.debug("Cannot find postcode {0}.".format(postcode))
except ValueError:
logging.debug("Cannot find postcode {0}.".format(val.postcode))
return df
def guess_coordinates_by_spatial_names_opsd(
df, fs_column, cap_col, total_cap, stat
):
# *** Use municipal_code and federal_state to define coordinates ***
if fs_column in df:
if "municipality_code" in df:
if df.municipality_code.dtype == str:
df.loc[df.municipality_code == "AWZ", fs_column] = "AWZ_NS"
if "postcode" in df:
df.loc[df.postcode == "000XX", fs_column] = "AWZ"
states = df.loc[df.lon.isnull()].groupby(fs_column).sum()[cap_col]
logging.debug(
"Fraction of undefined capacity by federal state "
+ "(percentage):"
)
for (state, capacity) in states.iteritems():
logging.debug(
"{0}: {1:.4f}".format(state, capacity / total_cap * 100)
)
stat.loc[state, "undefined_capacity"] = capacity
# A simple table with the centroid of each federal state.
f2c = pd.read_csv(
os.path.join(
cfg.get("paths", "geometry"),
cfg.get("geometry", "federalstates_centroid"),
),
index_col="name",
)
# Use the centroid of each federal state if the federal state is given.
# This is not very precise and should not be used for a high fraction
# of plants.
f2c = f2c.applymap(wkt_loads).centroid
for l in df.loc[(df.lon.isnull() & df[fs_column].notnull())].index:
if df.loc[l, fs_column] in f2c.index:
df.loc[l, "lon"] = f2c[df.loc[l, fs_column]].x
df.loc[l, "lat"] = f2c[df.loc[l, fs_column]].y
return df
def log_undefined_capacity(df, cap_col, total_cap, msg):
logging.debug(msg)
if len(df.loc[df.lon.isnull()]) == 0:
undefined_cap = 0
else:
undefined_cap = df.loc[df.lon.isnull()][cap_col].sum()
logging.info(
"{0} percent of capacity is undefined.".format(
undefined_cap / total_cap * 100
)
)
return undefined_cap
def complete_opsd_geometries(
df, category, time=None, fs_column="federal_state"
):
"""
Try different methods to fill missing coordinates.
"""
version_name = cfg.get("opsd", "version_name")
opsd_path = cfg.get("paths_pattern", "opsd").format(version=version_name)
message_path = os.path.join(opsd_path, "messages")
os.makedirs(message_path, exist_ok=True)
cap_col = "capacity"
if "id" not in df:
df["id"] = df.index
no_id = True
else:
no_id = False
if time is None:
time = datetime.datetime.now()
# Get index of incomplete rows.
incomplete = df.lon.isnull()
statistics = pd.DataFrame()
# Calculate total capacity
total_capacity = df[cap_col].sum()
statistics.loc["original", "undefined_capacity"] = log_undefined_capacity(
df,
cap_col,
total_capacity,
"IDs without coordinates found. Trying to fill the gaps.",
)
df = convert_utm_code_opsd(df)
statistics.loc["utm", "undefined_capacity"] = log_undefined_capacity(
df,
cap_col,
total_capacity,
"Reduced undefined plants by utm conversion.",
)
df = guess_coordinates_by_postcode_opsd(df)
statistics.loc["postcode", "undefined_capacity"] = log_undefined_capacity(
df, cap_col, total_capacity, "Reduced undefined plants by postcode."
)
df = guess_coordinates_by_spatial_names_opsd(
df, fs_column, cap_col, total_capacity, statistics
)
statistics.loc["name", "undefined_capacity"] = log_undefined_capacity(
df,
cap_col,
total_capacity,
"Reduced undefined plants by federal_state centroid.",
)
# Store table of undefined sets to csv-file
if incomplete.any():
df.loc[incomplete].to_csv(
os.path.join(
message_path,
"{0}_incomplete_geometries_before.csv".format(category),
)
)
incomplete = df.lon.isnull()
if incomplete.any():
df.loc[incomplete].to_csv(
os.path.join(
message_path,
"{0}_incomplete_geometries_after.csv".format(category),
)
)
df.loc[incomplete].groupby(
["energy_source_level_2", "technology"]
).sum().to_csv(
os.path.join(
message_path,
"{0}_incomplete_geometries_after_grouped.csv".format(category),
)
)
logging.debug("Gaps stored to: {0}".format(cfg.get("paths", "messages")))
statistics["total_capacity"] = total_capacity
statistics.to_csv(
os.path.join(message_path, "statistics_{0}_pp.csv".format(category))
)
# Log information
geo_check = not df.lon.isnull().any()
if not geo_check:
logging.warning("Plants with unknown geometry.")
logging.info("Geometry check: {0}".format(str(geo_check)))
logging.info(
"Geometry supplemented: {0}".format(
str(datetime.datetime.now() - time)
)
)
if no_id:
del df["id"]
return df
def remove_cols(df, cols):
"""Safely remove columns from dict."""
for key in cols:
try:
del df[key]
except KeyError:
pass
return df
def load_original_opsd_file(category, overwrite):
"""
Read file if exists or download it from source.
"""
version_name = cfg.get("opsd", "version_name")
opsd_path = cfg.get("paths_pattern", "opsd").format(version=version_name)
os.makedirs(opsd_path, exist_ok=True)
if category not in ["renewable", "conventional"]:
raise ValueError("Category '{0}' is not valid.".format(category))
v = {
"conventional": cfg.get("opsd", "version_conventional"),
"renewable": cfg.get("opsd", "version_renewable"),
}
orig_csv_file = os.path.join(
opsd_path,
cfg.get("opsd", "original_file_pattern").format(cat=category),
)
url_section = "opsd_url_pattern"
# Download non existing files. If you think that there are newer files you
# have to set overwrite=True to overwrite existing with downloaded files.
if not os.path.isfile(orig_csv_file) or overwrite:
logging.warning("File not found. Try to download it from server.")
logging.warning("Check URL if download does not work.")
# Download Data
url_data = cfg.get(url_section, "{0}_data".format(category))
req = requests.get(url_data.format(version=v[category]))
with open(orig_csv_file, "wb") as fout:
fout.write(req.content)
logging.warning(
"Downloaded from {0} and copied to '{1}'.".format(
url_data.format(version=v[category]), orig_csv_file
)
)
# Download Readme
url_readme = cfg.get(url_section, "{0}_readme".format(category))
req = requests.get(url_readme.format(version=v[category]))
with open(
os.path.join(
opsd_path,
cfg.get("opsd", "readme_file_pattern").format(cat=category),
),
"wb",
) as fout:
fout.write(req.content)
# Download json
url_json = cfg.get(url_section, "{0}_json".format(category))
req = requests.get(url_json.format(version=v[category]))
with open(
os.path.join(
opsd_path,
cfg.get("opsd", "json_file_pattern").format(cat=category),
),
"wb",
) as fout:
fout.write(req.content)
if category == "renewable":
df = pd.read_csv(orig_csv_file)
else:
df = pd.read_csv(orig_csv_file, index_col=[0])
return df
def prepare_dates(df, date_cols, month):
# Commission year from float or string
if df[date_cols[0]].dtype == np.float64:
df["com_year"] = df[date_cols[0]].fillna(1800).astype(np.int64)
else:
df["com_year"] = pd.to_datetime(
df[date_cols[0]].fillna("1800-01-01")
).dt.year
# Decommission year from float or string
if df[date_cols[1]].dtype == np.float64:
df["decom_year"] = df[date_cols[1]].fillna(2050).astype(np.int64)
else:
df["decom_year"] = pd.to_datetime(
df[date_cols[1]].fillna("2050-12-31")
).dt.year
if month:
df["com_month"] = pd.to_datetime(
df[date_cols[0]].fillna("1800-01-01")
).dt.month
df["decom_month"] = pd.to_datetime(
df[date_cols[1]].fillna("2050-12-31")
).dt.month
else:
df["com_month"] = 6
df["decom_month"] = 6
def prepare_opsd_file(category, prepared_file_name, overwrite):
# Load original opsd file
df = load_original_opsd_file(category, overwrite)
remove_list = None
date_cols = None
month = False
# Load original file and set differences between conventional and
# renewable power plants.
if category == "renewable":
# capacity_column='electrical_capacity'
remove_list = [
"tso",
"dso",
"dso_id",
"eeg_id",
"bnetza_id",
"federal_state",
"postcode",
"municipality_code",
"municipality",
"address",
"address_number",
"utm_zone",
"utm_east",
"utm_north",
"data_source",
]
date_cols = ("commissioning_date", "decommissioning_date")
month = True
elif category == "conventional":
# capacity_column='capacity_net_bnetza'
date_cols = ("commissioned", "shutdown")
df = df.rename(
columns={
"electrical_capacity": "capacity",
"capacity_net_bnetza": "capacity",
"efficiency_estimate": "efficiency",
}
)
if len(df.loc[df.lon.isnull()]) > 0:
df = complete_opsd_geometries(df, category, fs_column="federal_state")
else:
logging.info("Skipped 'complete_opsd_geometries' function.")
# Remove power plants with no capacity:
number = len(df[df["capacity"].isnull()])
df = df[df["capacity"].notnull()]
if number > 0:
msg = "{0} power plants have been removed, because the capacity was 0."
logging.warning(msg.format(number))
# To save disc and RAM capacity unused column are removed.
if remove_list is not None:
df = remove_cols(df, remove_list)
prepare_dates(df, date_cols, month)
df.to_csv(prepared_file_name)
return df
def load_opsd_file(category, overwrite, prepared=True):
version_name = cfg.get("opsd", "version_name")
opsd_path = cfg.get("paths_pattern", "opsd").format(version=version_name)
os.makedirs(opsd_path, exist_ok=True)
if prepared:
prepared_file_name = os.path.join(
opsd_path,
cfg.get("opsd", "cleaned_csv_file_pattern").format(cat=category),
)
if not os.path.isfile(prepared_file_name) or overwrite:
df = prepare_opsd_file(category, prepared_file_name, overwrite)
else:
df = pd.read_csv(prepared_file_name, index_col=[0])
else:
df = load_original_opsd_file(category, overwrite)
return df
def opsd_power_plants(overwrite=False):
"""
Prepare OPSD power plants and store table to hdf file with the categories
'renewable' and 'conventional'.
Examples
--------
>>> filename=opsd_power_plants()
>>> re=pd.read_hdf(filename, 'renewable') # doctest: +SKIP
>>> cv=pd.read_hdf(filename, 'conventional') # doctest: +SKIP
"""
strcols = {
"conventional": [
"name_bnetza",
"block_bnetza",
"name_uba",
"company",
"street",
"postcode",
"city",
"state",
"country_code",
"fuel",
"technology",
"chp",
"commissioned_original",
"status",
"type",
"eic_code_plant",
"eic_code_block",
"efficiency_source",
"energy_source_level_1",
"energy_source_level_2",
"energy_source_level_3",
"eeg",
"network_node",
"voltage",
"network_operator",
"merge_comment",
"geometry",
],
"renewable": [
"commissioning_date",
"decommissioning_date",
"energy_source_level_1",
"energy_source_level_2",
"energy_source_level_3",
"technology",
"voltage_level",
"comment",
"geometry",
],
}
version_name = cfg.get("opsd", "version_name")
opsd_path = cfg.get("paths_pattern", "opsd").format(version=version_name)
os.makedirs(opsd_path, exist_ok=True)
opsd_file_name = os.path.join(opsd_path, cfg.get("opsd", "opsd_prepared"))
if os.path.isfile(opsd_file_name) and not overwrite:
hdf = None
else:
hdf = pd.HDFStore(opsd_file_name, mode="w")
# If the power plant file does not exist, download and prepare it.
for category in ["conventional", "renewable"]:
# Define file and path pattern for power plant file.
cleaned_file_name = os.path.join(
opsd_path,
cfg.get("opsd", "cleaned_csv_file_pattern").format(cat=category),
)
exist = hdf is None
if not exist:
logging.info("Preparing {0} opsd power plants".format(category))
df = load_opsd_file(category, overwrite, prepared=True)
pp = geo.create_geo_df(df, lon_column="lon", lat_column="lat")
pp = geo.remove_invalid_geometries(pp)
df = pd.DataFrame(pp)
df[strcols[category]] = df[strcols[category]].astype(str)
hdf.put(category, df)
logging.info(
"Opsd {0} power plants stored to {1}".format(
category, opsd_file_name
)
)
if os.path.isfile(cleaned_file_name):
os.remove(cleaned_file_name)
if hdf is not None:
hdf.close()
return opsd_file_name
if __name__ == "__main__":
pass
``` |
{
"source": "jneu/calibration-pipeline-testing-tool",
"score": 2
} |
#### File: caltest/test_caldetector1/test_dark_current.py
```python
import pytest
import numpy as np
from jwst.dark_current import DarkCurrentStep
from jwst import datamodels
from astropy.io import fits
import matplotlib.pyplot as plt
import os
from ..utils import translate_dq, extract_subarray
@pytest.fixture(scope='module')
def fits_output(fits_input):
fname = fits_input[0].header['filename'].replace('.fits',
'_darkcurrentstep.fits')
yield fits.open(fname)
# delete the output FITS file after this module is finished
os.remove(fname)
@pytest.fixture(scope='module')
def fits_dark(fits_output):
ref_path = fits_output['PRIMARY'].header['R_DARK']
ref_path = ref_path.replace('crds://', '/grp/crds/cache/references/jwst/')
return fits.open(ref_path)
def test_dark_current_step(fits_input):
"""Make sure the DQInitStep runs without error."""
fname = fits_input[0].header['filename'].replace('.fits',
'_darkcurrentstep.fits')
DarkCurrentStep.call(datamodels.open(fits_input), output_file=fname,
save_results=True)
def test_dark_subtraction(fits_input, fits_dark, fits_output):
nframes = fits_output[0].header['NFRAMES']
groupgap = fits_output[0].header['GROUPGAP']
nints, ngroups, nx, ny = fits_output['SCI'].shape
nframes_tot = (nframes + groupgap) * ngroups
if nframes_tot > fits_dark['SCI'].data.shape[0]:
# data should remain unchanged if there are more frames in the
# science data than the reference file
assert np.all(fits_input['SCI'].data == fits_output['SCI'].data)
else:
dark_correct = np.zeros((nframes, ngroups, nx, ny))
data = fits_dark['SCI'].data[:nframes_tot, :, :]
for i in range(nframes):
dark_correct[i] = data[i::(nframes + groupgap), :, :]
dark_correct = np.average(dark_correct, axis=0)
dark_correct[np.isnan(dark_correct)] = 0
result = fits_input['SCI'].data - dark_correct
assert np.allclose(result, fits_output['SCI'].data)
def test_dark_current_quality(fits_input, fits_output):
"""
Check the slope of the median ramp for the detector. The count rate of the
dark subtracted ramp should be small (< 0.1?)
:param fits_input: astropy.io.fits.HDUList
The FITS HDUList input
:param fits_output: astropy.io.fits.HDUList
The FITS HDUList output
"""
med_in = np.median(fits_input['SCI'].data[0, :, :, :], axis=(1, 2))
med_out = np.median(fits_output['SCI'].data[0, :, :, :,], axis=(1,2))
groups = np.arange(med_in.shape[0])
slope_in, _ = np.polyfit(groups, med_in, 1)
slope_out, _ = np.polyfit(groups, med_out, 1)
print(
"Slope of median ramp before dark subtraction: {} counts/group".format(
slope_in))
print(
"Slope of median ramp after dark subtraction: {} counts/group".format(
slope_out))
plt.clf()
plt.plot(med_in, label='input')
plt.plot(med_out, label='output')
base = fits_input[0].header['FILENAME'].split('.')[0]
plot_fname = 'test_dark_current_quality_'+base+'.png'
plt.xlabel('Group Number')
plt.ylabel('Counts')
plt.savefig(plot_fname)
assert abs(slope_out) < 0.1
def test_pixeldq_propagation(fits_input, fits_output, fits_dark):
# translate dq flags to standard bits
pixeldq = translate_dq(fits_dark)
# extract subarray
if fits_dark[0].header['SUBARRAY'] == 'GENERIC':
pixeldq = extract_subarray(pixeldq, fits_input)
assert np.all(fits_output['PIXELDQ'].data == np.bitwise_or(fits_input['PIXELDQ'].data, pixeldq))
```
#### File: caltest/test_caldetector1/test_lastframe.py
```python
import numpy as np
from jwst.lastframe import LastFrameStep
from jwst import datamodels
import pytest
from astropy.io import fits
import os
@pytest.fixture(scope='module')
def fits_output(fits_input):
fname = fits_input[0].header['filename'].split('.fits', '_lastframestep.fits')
yield fits.open(fname)
# delete the output FITS file after this module is finished
os.remove(fname)
def test_lastframe_step(fits_input):
"""Make sure the LastFrameStep runs without error."""
fname = fits_input[0].header['filename'].split('.fits', '_lastframestep.fits')
LastFrameStep.call(datamodels.open(fits_input), output_file=fname,
save_results=True)
def test_lastframe_flagged(fits_input, fits_output):
"""
check that GROUPDQ lastframe is flagged as DO_NOT_USE
unless there is only 1 group
"""
if fits_output['SCI'].data.shape[1] > 1:
assert np.all(fits_output['GROUPDQ'].data[:, -1, :, :] & (1 << 0))
else:
assert np.all(fits_input['GROUPDQ'].data[:, -1, :, :]
== fits_output['GROUPDQ'].data[:, -1, :, :])
```
#### File: caltest/test_caldetector1/test_persistence.py
```python
import pytest
import os
import numpy as np
from astropy.io import fits
from jwst import datamodels
from jwst.persistence import PersistenceStep
#from jwst.datamodels import TrapsFilledModel
from jwst.datamodels import dqflags
#@pytest.fixture(scope="module")
#def input_hdul(request, config):
# if config.has_option("persistence", "input_file"):
# curdir = os.getcwd()
# config_dir = os.path.dirname(request.config.getoption("--config_file"))
# os.chdir(config_dir)
# hdul = fits.open(config.get("persistence", "input_file"))
# os.chdir(curdir)
# return hdul
# else:
# pytest.skip("needs persistence input_file")
@pytest.fixture(scope="module")
def out_hdul(fits_input):
fname = '_persist.'.join(fits_input[0].header['filename'].split('.'))
yield fits.open(fname)
#os.remove(fname)
@pytest.fixture(scope="module")
def trapsfilled_hdul(trapsfilled):
yield fits.open(trapsfilled)
@pytest.fixture(scope='module')
def traps_hdul(fits_input):
fname = '_trapsfilled.'.join(fits_input[0].header['filename'].split('.'))
yield fits.open(fname)
#os.remove(fname)
@pytest.fixture(scope='module')
def pers_hdul(fits_input):
fname = '_output_pers.'.join(fits_input[0].header['filename'].split('.'))
try:
hdul = fits.open(fname)
except:
print("output_pers file not present")
hdul = None
yield hdul
#os.remove(fname)
@pytest.fixture(scope="module")
def persat_hdul(out_hdul):
CRDS = '/grp/crds/cache/references/jwst/'
ref_file = output_hdul[0].header['R_PERSAT']
if 'crds://' in ref_file:
ref_file = ref_file.replace('crds://',CRDS)
return fits.open(ref_file)
@pytest.fixture(scope="module")
def trpden_hdul(output_hdul):
CRDS = '/grp/crds/cache/references/jwst/'
ref_file = output_hdul[0].header['R_TRPDEN']
if 'crds://' in ref_file:
ref_file = ref_file.replace('crds://',CRDS)
return fits.open(ref_file)
@pytest.fixture(scope="module")
def trppar_hdul(output_hdul):
CRDS = '/grp/crds/cache/references/jwst/'
ref_file = output_hdul[0].header['R_TRPPAR']
if 'crds://' in ref_file:
ref_file = ref_file.replace('crds://',CRDS)
return fits.open(ref_file)
def test_run_persist_step(fits_input,trapsfilled):
outfile = fits_input[0].header['FILENAME'].replace('.fits','_persist.fits')
if trapsfilled.lower() in ["none",""]:
PersistenceStep.call(fits_input,save_persistence=True,\
output_file=outfile,save_results=True)
else:
PersistenceStep.call(fits_input,save_persistence=True,\
output_file=outfile,save_results=True,\
input_trapsfilled=trapsfilled)
def test_persistence_trapsfilled_shape(fits_input,traps_hdul,trapsfilled):
'''Check to see that the OUPUT trapsfilled
file was created.'''
x,y = fits_input['SCI'].data.shape[-2:]
print("Science data shape (x,y) = ({},{})".format(x,y))
assert traps_hdul['SCI'].data.shape == (3,y,x)
def test_persistence_output_pers_shape(fits_input,pers_hdul,trapsfilled):
'''Check that the optional output file
"_output_pers.fits" was created if
the save_persistence option in the persistence
step was set to True. (Assume this test will
only be called in instances when save_persistence
is True'''
opshape = pers_hdul['SCI'].data.shape
print("Output_pers data shape: {}".format(opshape))
assert opshape == fits_input['SCI'].data.shape
def test_persistence_subtracted_signal(fits_input, out_hdul, pers_hdul, trapsfilled):
'''Check that the signal values contained in the
output_pers file are indeed subtracted from the original
input file.'''
assert np.allclose(out_hdul[1].data,fits_input[1].data - pers_hdul[1].data)
def test_persistence_dq_flagged_pix(out_hdul,pers_hdul,trapsfilled,flagthresh=40):
'''Pixels that have more persistence signal than flag_pers_cutoff
should be flagged in the DQ array of the output file. The default
value of flag_pers_cutoff is 40 DN'''
# Check only integration #1
pdata = pers_hdul['SCI'].data[0,:,:,:]
# Keep only the maximum persistence value
# for each pixel
if ((flagthresh is not None) and (flagthresh > 0)):
collapsed = np.max(pdata,axis=0)
flagged = collapsed > flagthresh
dq_data = out_hdul['PIXELDQ'].data
print(("{} pixels have persistence values above the threshold "
"of {}.".format(np.sum(flagged),flagthresh)))
assert np.all(dq_data[flagged] & dqflags.pixel['DO_NOT_USE'] > 0)
else:
print("Flagthresh is {}".format(flagthresh))
assert True == True
#def test_calculated_persistence(fits_input,pers_hdul,persat_hdul,trapsfilled):
# '''Using Regan's paper (JWST-STScI-005689), manually
# calculate the expected amount of persistence in the input
# file, and compare to the pipeline's calculations
#
# Not sure how to do this without simply copying the
# code in the jwst cal pipeline step.
# '''
#data = fits_input['SCI'].data[0,:,:,:]
#f21 = data[1,:,:] = data[0,:,:]
#fw_frac = f21 / persat_hdul['SCI']
#trapc - total number of traps captured
#trape - num of traps that fit exponential decay (?)
#tau - time constant of capture
#trapi - num traps instantaneously captured
#S - rate of change in the depletion region in units of fraction of full
# well per unit time
#T - integration time
#trapc = s*(T*(trape + trapi) + trape*tau*(exp(-T/tau) - 1))
```
#### File: caltest/test_caldetector1/test_refpix.py
```python
import pytest
import numpy as np
from jwst.refpix import RefPixStep
from jwst import datamodels
from astropy.io import fits
from scipy.stats import sigmaclip
import matplotlib.pyplot as plt
import os
from ..utils import translate_dq, extract_subarray
@pytest.fixture(scope='module')
def fits_output(fits_input):
fname = fits_input[0].header['filename'].replace('.fits',
'_refpixstep.fits')
yield fits.open(fname)
# delete the output FITS file after this module is finished
os.remove(fname)
def test_refpix_step(fits_input):
"""Make sure the DQInitStep runs without error."""
fname = fits_input[0].header['filename'].replace('.fits',
'_refpixstep.fits')
RefPixStep.call(datamodels.open(fits_input), output_file=fname,
save_results=True)
def test_refpix_correction(fits_input, fits_output, use_side_ref_pixels=True,
odd_even_columns=True, side_smoothing_length=11,
side_gain=1.0):
"""
Reference pixel correction implementation by <NAME>.
Parameters
----------
fits_input: astropy.io.fits.HDUList
Input data for RefPixStep
fits_output: astropy.io.fits.HDUList
Output data after RefPixStep is run.
use_side_ref_pixels: bool, optional
Whether the RefPixStep was run with `use_side_ref_pixels`
(default is True, same as `jwst.refpix.RefPixStep`)
odd_even_columns: bool
Whether the RefPixStep was run with `odd_even_columns`
(default is True, same as `jwst.refpix.RefPixStep`)
side_smoothing_length: int
`side_smoothing_length` used by `RefPixStep`
(default is 11, same as `jwst.refpix.RefPixStep`)
side_gain: float
`side_gain` used by `RefPixStep`
(default is 11, same as `jwst.refpix.RefPixStep`)
"""
delta_amp = 512
if odd_even_columns==False:
xs=[np.arange(delta_amp, dtype = 'uint32')]
else:
xs=[np.arange(delta_amp//2, dtype='uint32')*2, np.arange(delta_amp//2, dtype = 'uint32')*2 + 1]
data_in = fits_input
data_out = fits_output
subarray = data_in[0].header['SUBARRAY']
if subarray == 'FULL':
sci_in = data_in[1].data
sci_out = data_out[1].data
gdq_in = data_in[3].data
pdq_in = data_in[2].data
sci_shape = sci_in.shape
niter = sci_shape[0]
ngroup = sci_shape[1]
if data_in[0].header['INSTRUME'] != 'NIRISS':
pytest.skip('This test has only been implemented for NIRISS')
# change to detector coordinate
# TODO make coordinate changes for other instruments
fsci_in = np.swapaxes(sci_in, 2, 3)[:, :, ::-1, ::-1]
fsci_out = np.swapaxes(sci_out, 2, 3)[:, :, ::-1, ::-1]
fgdq_in = np.swapaxes(gdq_in, 2, 3)[:, :, ::-1, ::-1]
fpdq_in = np.swapaxes(pdq_in, 0, 1)[::-1, ::-1]
fpdq_rep = np.array([fpdq_in, ] * ngroup)
fsci_shape = fsci_in.shape
fexp_sci_out = np.zeros(fsci_shape, dtype='float32')
if odd_even_columns == True:
top_means = np.zeros([niter, ngroup, 4, 2], dtype='float32')
bottom_means = np.zeros([niter, ngroup, 4, 2], dtype='float32')
means = np.zeros([niter, ngroup, 4, 2], dtype='float32')
else:
top_means = np.zeros([niter, ngroup, 4, 1], dtype='float32')
bottom_means = np.zeros([niter, ngroup, 4, 1], dtype='float32')
means = np.zeros([niter, ngroup, 4, 1], dtype='float32')
for it in range(niter):
subg_fsci_in = fsci_in[it, :, :, :]
subm_fsci_in = subg_fsci_in.copy()
for ig in range(ngroup):
for ia in range(4):
zerox = ia * delta_amp
for io in range(len(xs)):
sub_pdq_top = fpdq_rep[ig, 2044:2048, zerox + xs[io]]
sub_gdq_top = fgdq_in[it, ig, 2044:2048, zerox + xs[io]]
sub_sci_top = subg_fsci_in[ig, 2044:2048,
zerox + xs[io]]
sub_pdq_bottom = fpdq_rep[ig, 0:4, zerox + xs[io]]
sub_gdq_bottom = fgdq_in[it, 0:4, ig, zerox + xs[io]]
sub_sci_bottom = subg_fsci_in[ig, 0:4, zerox + xs[io]]
valid_top = np.where(
(sub_pdq_top != 1) & (sub_gdq_top != 1))
valid_bottom = np.where(
(sub_pdq_bottom != 1) & (sub_gdq_bottom != 1))
top_means[it, ig, ia, io] = np.mean(
sigmaclip(sub_sci_top[valid_top], low=3.0,
high=3.0).clipped)
bottom_means[it, ig, ia, io] = np.mean(
sigmaclip(sub_sci_bottom[valid_bottom], low=3.0,
high=3.0).clipped)
means[it, ig, ia, io] = (top_means[it, ig, ia, io] +
bottom_means[
it, ig, ia, io]) / 2.
subm_fsci_in[ig, :, zerox + xs[io]] = subg_fsci_in[ig,
:,
zerox + xs[io]] - \
means[
it, ig, ia, io]
if use_side_ref_pixels == True:
sub_pdq_left = fpdq_rep[ig, :, 0:4]
sub_sci_left = subm_fsci_in[ig, :, 0:4]
sub_pdq_right = fpdq_rep[ig, :, 2044:2048]
sub_sci_right = subm_fsci_in[ig, :, 2044:2048]
left_means = median_refpix(sub_sci_left,
side_smoothing_length,
sub_pdq_left)
right_means = median_refpix(sub_sci_right,
side_smoothing_length,
sub_pdq_right)
lr_means = 0.5 * (left_means + right_means) * side_gain
mrep = np.array([lr_means, ] * 2048)
mrep = np.swapaxes(mrep, 0, 1)
subm_fsci_in[ig, :, :] = subm_fsci_in[ig, :, :] - mrep
fexp_sci_out[it, :, :, :] = subm_fsci_in
exp_sci_out = np.swapaxes(fexp_sci_out, 2, 3)[:, :, ::-1, ::-1]
dif = sci_out - exp_sci_out
mins = np.min(dif)
maxs = np.max(dif)
good = np.where(sci_out != 0.)
if len(good[0]) > 0:
fmins = np.min(dif[good] / sci_out[good])
fmaxs = np.max(dif[good] / sci_out[good])
print('mins maxs frac_min frac_max')
print('{} {} {} {}'.format(mins, maxs, fmins, fmaxs))
assert np.allclose(sci_out, exp_sci_out)
def median_refpix(array, smoothing_length, pixel_dq):
# This code computes the median reference pixel value in teh "use_side_ref_pix = True" option of the reference pixel correction.
# array must be 2048x4
# first pad array with reflect
parray = np.pad(array,
((smoothing_length // 2, smoothing_length // 2), (0, 0)),
'reflect')
ppdq = np.pad(pixel_dq,
((smoothing_length // 2, smoothing_length // 2), (0, 0)),
'constant', constant_values=0)
xmin = smoothing_length
xmax = 2048 + smoothing_length - 1
med_arr = np.zeros(2048)
for i in range(2048):
sub_array = parray[
i + smoothing_length // 2 - smoothing_length // 2:i + smoothing_length // 2 + smoothing_length // 2 + 1,
:]
sub_pdq = ppdq[
i + smoothing_length // 2 - smoothing_length // 2:i + smoothing_length // 2 + smoothing_length // 2 + 1,
:]
good = np.where(sub_pdq != 1)
med_arr[i] = np.median(sub_array[good])
return (med_arr)
```
#### File: caltest/test_caldetector1/test_superbias.py
```python
from ..utils import translate_dq, extract_subarray
import os
import numpy as np
import pytest
from astropy.io import fits
from jwst.superbias import SuperBiasStep
from jwst import datamodels
import numpy as np
from scipy.stats import normaltest
from astropy.stats import sigma_clipped_stats
import matplotlib.pyplot as plt
@pytest.fixture(scope='module')
def fits_output(fits_input):
fname = fits_input[0].header['filename'].replace('.fits',
'_superbiasstep.fits')
yield fits.open(fname)
os.remove(fname)
@pytest.fixture(scope='module')
def fits_superbias(fits_output):
ref_path = fits_output['PRIMARY'].header['R_SUPERB']
ref_path = ref_path.replace('crds://', '/grp/crds/cache/references/jwst/')
return fits.open(ref_path)
def test_superbias_step(fits_input):
"""Make sure the DQInitStep runs without error."""
fname = fits_input[0].header['filename'].replace('.fits',
'_superbiasstep.fits')
SuperBiasStep.call(datamodels.open(fits_input), output_file=fname,
save_results=True)
def test_superbias_subtraction(fits_input, fits_output, fits_superbias):
if fits_input[0].header['SUBARRAY'] == fits_superbias[0].header['SUBARRAY']:
bias = fits_superbias['SCI'].data
else:
bias = extract_subarray(fits_superbias['SCI'].data, fits_input)
bias_to_subtract = np.copy(bias)
bias_to_subtract[np.isnan(bias_to_subtract)] = 0
assert np.allclose(fits_output['SCI'].data, (fits_input['SCI'].data - bias_to_subtract))
def test_superbias_residuals(fits_output, fits_input):
mean, median, std = sigma_clipped_stats(fits_output['SCI'].data[0,0,:,:],
fits_output['PIXELDQ'].data.astype(bool),
iters=None)
print("Sigma clipped stats")
print("mean = {}".format(mean))
print("median = {}".format(median))
print("standard deviation = {}".format(std))
# normaltest(fits_output['SCI'].data)
# make plot
base = fits_input[0].header['FILENAME'].split('.')[0]
plot_fname = 'test_superbias_residuals_'+base+'.png'
plt.clf()
plt.hist(fits_output['SCI'].data[0,0,:,:].flatten(),
range=(median - 5 * std, median + 5 * std),
bins=100)
plt.xlabel('First Frame Counts')
plt.ylabel('Number of Pixels')
plt.savefig(plot_fname)
def test_pixeldq_propagation(fits_input, fits_output, fits_superbias):
# translate dq flags to standard bits
pixeldq = translate_dq(fits_superbias)
# extract subarray
if fits_superbias[0].header['SUBARRAY'] == 'GENERIC':
pixeldq = extract_subarray(pixeldq, fits_input)
assert np.all(fits_output['PIXELDQ'].data == np.bitwise_or(fits_input['PIXELDQ'].data, pixeldq))
``` |
{
"source": "jneuendorf/command_control",
"score": 3
} |
#### File: lib/interfaces/restartable.py
```python
from .startable import Startable
from .stoppable import Stoppable
class Restartable(Startable, Stoppable):
inverse_actions = {
"start": "stop"
}
def restart(self, configuration):
raise NotImplementedError(
"restart() needs to be implemented for module {}"
.format(self)
)
```
#### File: modules/abstract_project/abstract_project.py
```python
import os
from ... import _globals
from ... import lib
from ... import settings
from .configurations import configurations
class AbstractProject(lib.Module, lib.Loadable):
"""
This class is special as its name is not important.
Instead it will be instantiated if the module name (from the command line)
matches a key in settings.projects.
"""
def __init__(self, name):
super().__init__(configurations())
self.name = name
if name in settings.projects:
project_data = settings.projects[name]
# load and unload keys are defined
if "load" in project_data:
if "unload" in project_data:
self.load_commands = project_data["load"]
self.unload_commands = project_data["unload"]
self.auto_invert_actions = False
_globals._current_project_settings = {
key: val
for key, val in project_data.items()
if key not in {"load", "unload"}
}
else:
raise ValueError(
"Project data must either be a list or "
"a directory containing 'load' and 'unload'."
)
# list of commands => unload_commands are implied
else:
self.load_commands = project_data
self.unload_commands = reversed(project_data)
self.auto_invert_actions = True
# no change to _globals._current_project_settings
else:
raise ValueError(
"There is no project with name '{}'."
.format(name)
)
# @Override
def _print_on_do(self, action):
if action not in ["load", "unload"]:
super()._print_on_do(action)
else:
print(
"{}ing {}........"
.format(action, self.name)
)
def load(self, configuration):
"""
When loading a project multiple `cd` actions are possible.
The intermediate directories that may be visited are not
visible to the user.
Therefore, only the last `cd` will be printed so the calling script
can change the directory using `source cd`.
"""
last_cd_command_idx = None
parsed_commands = []
for i, command in enumerate(self.load_commands):
parsed_command = lib.parse_args(command.split())
parsed_commands.append(parsed_command)
actions, modules = parsed_command
if "cd" in actions:
last_cd_command_idx = i
for i, parsed_command in enumerate(parsed_commands):
actions, modules = parsed_command
if i != last_cd_command_idx:
# execute actions normally
if actions != "cd":
for action in actions:
for module in modules:
module.do(action)
# special action: cd -> modules == path
else:
self.cd(configuration, modules)
# last of the "cd" actions -> print for wrapper bash script's eval
else:
if not _globals._is_sourced:
print(
"\n# WARNING:\n"
"# Tried changing the directory "
"while loading a project.\n"
"# This script should be called like "
"`source ctl.sh load {}`.\n".format(self.name)
)
self.cd(configuration, modules)
# same as load but without special last `cd` command
# commands are executed in reversed order
def unload(self, configuration):
parsed_commands = []
for command in self.unload_commands:
parsed_commands.append(lib.parse_args(command.split()))
for parsed_command in parsed_commands:
actions, modules = parsed_command
# execute actions normally
if actions != "cd":
for action in actions:
for module in modules:
if self.auto_invert_actions:
module.do(module.inverse_action(action))
else:
module.do(action)
# special action: cd -> modules == path
else:
self.cd(configuration, modules)
self.cd(configuration, settings.PROJECT_UNLOAD_DIRECTORY)
def cd(self, configuration, path):
if _globals._dry_run or _globals._is_sourced:
print("cd {}".format(path))
path = os.path.expanduser(path)
os.chdir(path)
``` |
{
"source": "jneuendorf/dkb_pdf2csv",
"score": 3
} |
#### File: data_analysis/classifiers/base.py
```python
from abc import ABC, abstractmethod
class BaseClassifier(ABC):
EXPORTABLES = ()
tag_queryset = None
tag_meta_vars = None
def __init__(self, tag_queryset):
self.tag_queryset = tag_queryset
self.tag_meta_vars = self.get_tag_meta_vars(tag_queryset)
def run(self, data_point):
"""Injects instances attributes into the 'classify' method
in order to provide a easier and functional interface for that method.
"""
return self.classify(
data_point,
self.tag_queryset,
self.tag_meta_vars,
)
@abstractmethod
def classify(self, data_point, tag_queryset, tag_meta_vars) -> list:
"""Classifies the given data point as zero, one or multiple
of the given existing tag_queryset.
Parameters:
- data_point: data_vis.models.DataPoint
- tag_queryset: Iterable[data_vis.models.Tag]
- tag_meta_vars: Dict[str, Dict[str, Any]]
Maps tag.identifier to tag.meta intpreted as python code
Returns: List[dava_vis.models.Tag]
"""
pass
def get_tag_meta_vars(self, tag_queryset):
tag_meta_vars = {}
exportable_vars = self.EXPORTABLES
errors = {}
for tag in tag_queryset:
locals = {}
try:
exec(tag.meta, globals(), locals)
except SyntaxError as e:
errors[tag.identifier] = e
continue
tag_meta_vars[tag.identifier] = {
exportable_var: locals.get(exportable_var)
for exportable_var in exportable_vars
}
if errors:
raise ValueError(errors)
return tag_meta_vars
```
#### File: data_analysis/pattern_finders/base.py
```python
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Iterable, Set
from django.db.models import QuerySet
from data_vis.models import DataPoint
__all__ = ('Types', 'BasePatternFinder')
Subset = Iterable[DataPoint]
Subsets = Iterable[Subset]
class objectview:
"""See https://goodcode.io/articles/python-dict-object/"""
def __init__(self, **kwargs):
self.__dict__ = kwargs
Types = objectview(
Subset=Subset,
Subsets=Subsets,
QuerySet=QuerySet,
)
class BasePatternFinder(ABC):
@dataclass
class Options:
"""https://stackoverflow.com/questions/51575931/"""
min_length: int = 2
"""Specifies the minimum length that a pattern must have."""
precision: str = 'days'
"""Specifies the granularity of time
that points will be matched against.
Any attribute name of relativedelta, e.g. 'days'.
"""
def __init__(self, **options):
self.options = self.Options(**options)
def find(self, data_points_queryset: QuerySet) -> Set[Subset]:
"""Finds patterns in subsets of points."""
return {
subset
for subset in self.select_subsets(data_points_queryset)
if self.should_use_subset(subset)
}
@abstractmethod
def select_subsets(self, data_points_queryset: QuerySet) -> Subsets:
"""Selects certain subsets of points to be potential patterns.
The subsets are pattern candidates.
"""
...
def should_use_subset(self, subset: Subset) -> bool:
"""Indicates if the given subset is actually a pattern.
It acts as a filter condition for all candidates.
"""
return bool(subset)
```
#### File: data_vis/models/pdf_file.py
```python
from django.db import models
from .validators import validate_is_pdf
class PdfFile(models.Model):
series = models.ForeignKey(
'Series',
on_delete=models.CASCADE,
related_name='pdfs',
blank=True,
null=True,
)
file = models.FileField(upload_to='pdfs/%Y/', validators=[validate_is_pdf])
is_imported = models.BooleanField(default=False)
def __str__(self):
return f'<{self.__class__.__name__} {self.file.name}>'
```
#### File: data_vis/models/validators.py
```python
import os
from django.core.exceptions import ValidationError
import magic
def validate_file_type(file, valid_mime_types, valid_file_extensions):
file_mime_type = magic.from_buffer(file.read(1024), mime=True)
if file_mime_type not in valid_mime_types:
raise ValidationError('Unsupported file type.')
valid_file_extensions = ['.pdf']
ext = os.path.splitext(file.name)[1]
if ext.lower() not in valid_file_extensions:
raise ValidationError('Unacceptable file extension.')
def validate_is_pdf(file):
return validate_file_type(
file,
valid_mime_types=['application/pdf'],
valid_file_extensions=['.pdf'],
)
```
#### File: views/api/analytics.py
```python
import json
from typing import List
from django.http import JsonResponse
# from django.views.decorators.csrf import csrf_exempt
from data_analysis.pattern_finders.frequency import FrequencyPatternFinder
from .data import limited_series
from .serializers import JsonEncoder
# @csrf_exempt
def finders(request):
if 'start' not in request.GET or 'end' not in request.GET:
raise ValueError('Missing GET param "start" or "end"')
body_unicode = request.body.decode('utf-8')
body = json.loads(body_unicode)
# print(body)
if body:
# FIELD_NAMES = ('id', 'x', 'dy')
FIELD_NAMES = ('id',)
finders: List[FrequencyPatternFinder] = [
FrequencyPatternFinder(**kwargs)
for kwargs in body
]
patterns_by_series = {}
for series, data_points_queryset in limited_series(request):
# print('??', data_points_queryset)
for finder in finders:
patterns = finder.find(data_points_queryset)
# print('??', patterns)
patterns_by_series[series.name] = [
[
point.as_dict(field_names=FIELD_NAMES)
for point in pattern
]
for pattern in patterns
]
return JsonResponse(
patterns_by_series,
encoder=JsonEncoder,
# safe=False,
)
else:
raise ValueError('No request body given')
```
#### File: views/api/serializers.py
```python
from django.core.serializers.json import DjangoJSONEncoder
class JsonEncoder(DjangoJSONEncoder):
"""Additional types: set, tuple"""
def default(self, obj):
if isinstance(obj, (set, tuple)):
return list(obj)
# if hasattr(obj, 'to_json') and callable(obj.to_json):
# return obj.to_json()
return super().default(obj)
```
#### File: views/api/tags.py
```python
from django.http import HttpRequest, JsonResponse
from ...models import Tag
from .serializers import JsonEncoder
def tags(request: HttpRequest):
return JsonResponse(
list(Tag.objects.values('identifier', 'is_abstract')),
encoder=JsonEncoder,
safe=False,
)
```
#### File: dkb_pdf2csv/pdf_importer/row.py
```python
from typing import List, Dict, Any
from .types import Element
class Row:
cells: List[Element]
options: Dict[str, Any]
def __init__(self, cells, **options):
self.cells = cells
self.options = options
def get_texts(self) -> List[str]:
return [cell.get_text().strip() for cell in self.cells]
def get_text(self, sep: str = '') -> str:
return sep.join(self.get_texts())
def as_dict(self, header_row: "HeaderRow"):
"""
Since a row can miss a cell in the middle,
we cannot just use the indices to map to columns.
Therefore, we try to match either the left x (x0)
or the right x (x1) coordinate.
"""
header_row_labels_by_x = header_row.get_labels_by_x()
row_dict = {
# NOTE: This is useful for debugging functions that otherwise don't
# have access to the row and its cells.
'__self__': self,
}
for i, cell in enumerate(self.cells):
cell_text = cell.get_text().strip()
matched_x = None
if cell.x0 in header_row_labels_by_x:
matched_x = cell.x0
elif cell.x1 in header_row_labels_by_x:
matched_x = cell.x1
else:
# TODO: Check if 'tolerance' in self.options
x0s_in_tolerance = [
x for x in header_row_labels_by_x.keys()
if abs(x - cell.x0) <= self.options['tolerance']
]
x1s_in_tolerance = [
x for x in header_row_labels_by_x.keys()
if abs(x - cell.x1) <= self.options['tolerance']
]
# If there is only one distinct value within the tolerance,
# we take it (as we prefer left alignment just like above).
if len(x0s_in_tolerance) == 1:
matched_x = x0s_in_tolerance[0]
# Try right alignment.
elif len(x1s_in_tolerance) == 1:
matched_x = x1s_in_tolerance[0]
else:
raise ValueError(
f'Could not associate cell {str(cell)} at '
f'positition {i} ''with a column.'
)
if matched_x is not None:
row_dict[header_row_labels_by_x[matched_x]] = cell_text
return row_dict
def __len__(self):
return len(self.cells)
def __eq__(self, row):
return isinstance(row, Row) and self.get_texts() == row.get_texts()
def __str__(self):
texts = self.get_texts()
return f'<{self.__class__.__name__} {str([text for text in texts])}>'
# @property
# def y_top(self):
# return self.cells[0].y1
class HeaderRow(Row):
def __init__(self, pages, **options):
"""Look for number of columns of the table.
Assuming that the header row has the greatest number of elements
with the same top-y value.
We also assume the table has the same structure across all pages
and starts on the 1st page.
"""
header_cells = []
for row in pages[0]:
if len(row) > len(header_cells):
header_cells = row.cells
super().__init__(header_cells, **options)
def get_texts(self):
return (
self.options['custom_labels']
if self.options['custom_labels'] is not None
else super().get_texts()
)
def get_labels_by_x(self):
header_row_labels = self.get_texts()
return {
# left x coordinates
**{
cell.x0: header_row_labels[i]
for i, cell in enumerate(self.cells)
},
# right x coordinates
**{
cell.x1: header_row_labels[i]
for i, cell in enumerate(self.cells)
},
}
def __eq__(self, row):
"""Compares also the cell texts in case self.options['custom_label']
is set. If there are custom labels the header row read from the PDF
still contains the original labels which the HeaderRow instance knows
because the original cells are still associated.
"""
return super().__eq__(row) or Row(self.cells) == row
```
#### File: dkb_pdf2csv/pdf_importer/transform_emit.py
```python
import csv
from typing import List
from .row import Row, HeaderRow
def transform_emit(pdf: str, header_row: HeaderRow, rows: List[Row],
rows_by_page: List[List[Row]],
callback_args: list):
# No transform
with open(f'{pdf}.csv', 'w') as f:
writer = csv.DictWriter(f, fieldnames=header_row.get_texts())
writer.writeheader()
writer.writerows([
row.as_dict(header_row)
for row in rows
])
``` |
{
"source": "jneuendorf/pokemon-image-dataset",
"score": 2
} |
#### File: pokemon_image_dataset/data_sources/base.py
```python
from abc import ABC, abstractmethod
from collections import Iterable, Collection, Callable
from pathlib import Path
from typing import TypeVar, Generic, Union
from pokemon_image_dataset.form import POKEMON_FORMS, DISMISS_FORM, PokemonForm, BasePokemonImage
from pokemon_image_dataset.utils import (
parse_ndex,
verify_sha256_checksum,
)
from .path_dict import PathDict
T = TypeVar('T', bound=BasePokemonImage)
# class DataSourceMeta(type(ABC), type(Generic[T])):
# pass
class DataSource(ABC, Generic[T]):
checksum: str = None
extra_ops = ()
tmp_dir: Path = None
images: set[T] = []
def __init__(self, *, tmp_dir: Path):
self.tmp_dir = tmp_dir
def run(self, force=False) -> None:
self.root.mkdir(parents=True, exist_ok=True)
data_path = self.get(force)
self.verify_checksum(data_path)
self.process(data_path)
self.arrange()
self.images = set(self.get_images(self.associate_forms()))
self.post_process()
@property
def root(self) -> Path:
return self.tmp_dir / f'__{self.__class__.__name__}'
@abstractmethod
def get(self, force: bool):
...
def verify_checksum(self, data_path: Path) -> None:
verify_sha256_checksum(data_path, self.checksum)
def process(self, archive) -> None:
...
def arrange(self) -> None:
...
def associate_forms(self) -> list[tuple[PokemonForm, Path]]:
form_path_tuples = []
assigned_forms = self.assign_forms()
# NOTE: Sorting enhances value of printed operations
# but is essential for having a deterministic order so that
# the developer can solve issues with chained renames
for filename in sorted(self.get_files()):
if filename.is_symlink():
print('dismissing detected symlink', filename, '=>', filename.readlink())
# filename.unlink()
else:
stem = filename.stem
form = assigned_forms[filename]
found_form: PokemonForm
if form is None:
ndex = self.parse_ndex(stem)
forms = [f for f in POKEMON_FORMS[ndex] if f.name == stem]
assert len(forms) == 1, (
f'got {len(forms)} matching forms instead 1 for {filename}'
)
found_form = forms[0]
else:
found_form = form
if found_form is DISMISS_FORM:
print('dismissing', filename)
# filename.unlink()
else:
form_path_tuples.append((
found_form,
filename,
))
# # Avoid unnecessary renames
# if found_form.name != stem:
# rename_to = filename.with_stem(found_form.name)
# print('rename:', filename, rename_to)
# filename.rename(rename_to)
print('form_path_tups', form_path_tuples)
return form_path_tuples
def assign_forms(self) -> PathDict[PokemonForm]:
return PathDict()
@abstractmethod
def get_images(self, associated_forms: list[tuple[PokemonForm, Path]]) -> Collection[T]:
"""Allows subclasses to use their own according BasePokemonImage subclass."""
...
# def replace_image(self, image: T, replacements: Collection[BasePokemonImage]):
# assert all(img.source_file.exists() for img in replacements), "some of the split images' files do not exist"
# # We want the KeyError in case the image is not in the set.
# self.images.remove(image)
# self.images |= set(replacements)
def post_process(self):
...
@abstractmethod
def get_files(self) -> Iterable[Path]:
...
def parse_ndex(self, filename: str) -> int:
return parse_ndex(filename)
```
#### File: pokemon_image_dataset/data_sources/path_dict.py
```python
from pathlib import Path
from typing import Dict, Optional, TypeVar, Generic
T = TypeVar('T')
class PathDict(Generic[T], dict):
"""Keys are strings representing paths.
Item access happens with `Path` instances.
"""
@classmethod
def with_prefix(cls, prefix: str, kwargs: Dict[str, T]):
return cls({
f'{prefix}{key}': value
for key, value in kwargs.items()
})
def __getitem__(self, path: Path) -> Optional[T]:
# assert isinstance(
# path, Path), f'expected key to be a Path but got {type(path).__name__}'
for key, val in self.items():
if path.match(key) or path.with_suffix('').match(key):
return val
return None
```
#### File: pokemon_image_dataset/data_sources/sprite_set.py
```python
import shutil
from dataclasses import dataclass, field
from pathlib import Path
from typing import Collection, Optional, Union
from pokemon_image_dataset.form import PokemonForm, PokemonImage
from pokemon_image_dataset.utils import NAME_DELIMITER, get_image_frames, whiten_areas
from .archive import RemoteArchiveDataSource
PostProcessorSpec = Union[str, tuple[str, dict]]
@dataclass
class SpriteSetConfig:
glob: str
"""Glob pattern for relevant image files."""
extra: dict[str, str] = field(default_factory=dict)
dest: Optional[str] = None
"""Rename sprite set folder."""
post_process: list[PostProcessorSpec] = field(default_factory=list)
"""Method name or tuples of name and kwargs.
Method that get called for a sprite set with
(src: str, conf: SpriteSetConfig, **kwargs)
in the specified order.
"""
class SpriteSetDataSource(RemoteArchiveDataSource[PokemonImage]):
sprite_sets: dict[str, SpriteSetConfig] = {}
"""Keys are folders in the (unpacked) data source."""
def arrange(self):
"""Moves sprite set folders into `self.tmp_dir` and
saves the destinations for `self.get_files`.
"""
for src, conf in self.sprite_sets.items():
root = self.root / src
pattern = conf.glob
extra = conf.extra
dest = self.get_dest(src)
if dest.exists():
print('deleting existing', dest)
shutil.rmtree(dest)
dest.mkdir(parents=True, exist_ok=True)
for file in root.glob(pattern):
shutil.move(file, dest / file.name)
for extra_src, extra_dest in extra.items():
shutil.move(root / extra_src, dest / extra_dest)
shutil.rmtree(self.root)
def get_images(self, associated_forms: list[tuple[PokemonForm, Path]]) -> list[PokemonImage]:
return [
PokemonImage(
data_source=self,
form=form,
source_file=filename,
sprite_set=filename.parent.name,
format=filename.suffix,
)
for form, filename in associated_forms
]
def get_dest(self, sprite_set: str, root: Path = None) -> Path:
if root is None:
root = self.tmp_dir
conf = self.sprite_sets[sprite_set]
return root / (conf.dest or Path(sprite_set).name)
def get_files(self):
for src in self.sprite_sets:
yield from self.get_dest(src).iterdir()
def post_process(self):
for src, conf in self.sprite_sets.items():
for method_spec in conf.post_process:
method_name: str
kwargs = {}
if isinstance(method_spec, tuple):
method_name, kwargs = method_spec
else:
method_name = method_spec
method = getattr(self, method_name)
method(src, conf, **kwargs)
# POST PROCESSORS
def split_gif_frames(self, src: str, conf: SpriteSetConfig):
def split_image(image: PokemonImage) -> list[PokemonImage]:
gif = image.source_file
assert gif.suffix == '.gif', f'expected gif image but got {image.source_file}'
images = []
for i, frame in enumerate(get_image_frames(gif)):
# Ignore single color frames
if frame.colors == 1:
print(f'excluding single color frame {i} from {gif}')
continue
frame_png = gif.with_stem(f'{gif.stem}{NAME_DELIMITER}{i}').with_suffix('.png')
frame.save(filename=frame_png)
images.append(PokemonImage(
data_source=image.data_source,
form=image.form,
source_file=frame_png,
sprite_set=image.sprite_set,
frame=i,
format='.png',
))
return images
# TODO: async
images_to_replace = {image for image in self.images if image.sprite_set == src}
replacements = {
replacement
for image in images_to_replace
for replacement in split_image(image)
}
assert all(img.source_file.exists() for img in replacements), "some of the split images' files do not exist"
self.images = (self.images - images_to_replace) | replacements
def whiten_areas(
self,
src: str,
conf: SpriteSetConfig,
forms: Collection[Union[PokemonForm, tuple[PokemonForm, list[tuple[int, int]]]]] = (),
) -> None:
forms_an_coords = [
(form, [(0, 0)]) if isinstance(form, PokemonForm) else form
for form in forms
]
dest = self.get_dest(src)
for form, coords in forms_an_coords:
filename = dest / f'{form.name}.png'
print('whitening area of', filename, 'at', coords)
whiten_areas(filename, coords)
```
#### File: data_sources/veekun/icons.py
```python
from pokemon_image_dataset.data_sources import PathDict
from pokemon_image_dataset.data_sources import SpriteSetConfig as Conf
from pokemon_image_dataset.data_sources import SpriteSetDataSource
from pokemon_image_dataset.form import DISMISS_FORM, Form, get_form
class Icons(SpriteSetDataSource):
url = 'https://veekun.com/static/pokedex/downloads/pokemon-icons.tar.gz'
checksum = 'f9850ce82d8e6e69c163112c47553458fd27805034217a5331a1ae12b2a1c8ac'
sprite_sets = {
'pokemon/icons': Conf(
glob='*.png',
extra={
'female/521.png': '521-female.png',
'female/592.png': '592-female.png',
'female/593.png': '593-female.png',
'female/668.png': '668-female.png',
'female/678.png': '678-female.png',
},
),
}
def assign_forms(self):
return PathDict(**{
'icons/25-cosplay': get_form(25, 'cosplay'),
'icons/25-rock-star': get_form(25, 'cosplay-rock-star'),
'icons/25-belle': get_form(25, 'cosplay-belle'),
'icons/25-pop-star': get_form(25, 'cosplay-pop-star'),
'icons/25-phd': get_form(25, 'cosplay-phd'),
'icons/25-libre': get_form(25, 'cosplay-libre'),
'icons/201': get_form(201, 'a'),
'icons/386-normal': get_form(386, Form.NORMAL),
'icons/412': get_form(412, 'plant'),
'icons/413': get_form(413, 'plant'),
'icons/421': get_form(421, 'overcast'),
'icons/422': get_form(422, 'west'),
'icons/423': get_form(423, 'west'),
'icons/487': get_form(487, 'altered'),
'icons/492': get_form(492, 'land'),
'icons/493-*': DISMISS_FORM, # invalid forms (equal normal)
'icons/550': get_form(550, 'red-striped'),
'icons/555': get_form(555, Form.NORMAL),
'icons/555-standard': get_form(555, Form.NORMAL),
'icons/555-zen': get_form(555, 'zen'),
'icons/585': get_form(585, 'spring'),
'icons/586': get_form(586, 'spring'),
'icons/641': get_form(641, 'incarnate'),
'icons/642': get_form(642, 'incarnate'),
'icons/645': get_form(645, 'incarnate'),
'icons/647': get_form(647, 'ordinary'),
'icons/648': get_form(648, 'aria'),
'icons/649-*': DISMISS_FORM, # invalid forms (equal normal)
'icons/666': get_form(666, 'meadow'),
'icons/669': get_form(669, 'red'),
'icons/670': get_form(670, 'red'),
'icons/670-eternal': DISMISS_FORM, # unknown form
'icons/671': get_form(671, 'red'),
'icons/676': get_form(676, Form.NORMAL),
'icons/676-natural': get_form(676, Form.NORMAL),
'icons/678-male': get_form(678, Form.NORMAL),
'icons/678-female': get_form(678, Form.FEMALE),
'icons/681': get_form(681, 'shield'),
'icons/710-*': DISMISS_FORM,
'icons/711-*': DISMISS_FORM,
'icons/716': get_form(716, 'active'),
'icons/718': get_form(718, '50-percent'),
'icons/720': get_form(720, 'confined'),
'icons/egg': DISMISS_FORM,
})
``` |
{
"source": "jneuendorf/prefix-codes",
"score": 2
} |
#### File: prefix_codes/codecs/arithmetic.py
```python
import itertools
from collections import OrderedDict
from collections.abc import Iterable
from typing import Generic
from tqdm import tqdm
from prefix_codes.codecs.base import T
from prefix_codes.codecs.shannon_fano_elias import ShannonFanoEliasCodec, ModelType
from prefix_codes.typedefs import BitStream, Bit
from prefix_codes.utils import set_bit, write_bits, read_bits_from_string, read_bits
def bit_string(n: int, bits: int = 0) -> str:
# NOTE: Default means as many bits as necessary
# (because specifying less than necessary is impossible)
return format(n, f'0{bits}b')
def leading_zeros(n: int, bits: int) -> int:
try:
return bit_string(n, bits).index('1')
except ValueError:
return bits
def trailing_ones(n: int, bits: int) -> int:
try:
return bits - bit_string(n, bits).rindex('0') - 1
except ValueError:
return bits
def handle_carry(n: int, bits: int, c: int) -> tuple[int, int, BitStream]:
bit_stream: list[Bit] = []
carry = int(bit_string(n, bits)[0])
if carry == 1:
n -= (1 << (bits - 1))
bit_stream.append(1)
c -= 1
if c > 1:
bit_stream.extend([0] * (c - 1))
c = 1
return n, c, bit_stream
class ArithmeticCodec(ShannonFanoEliasCodec, Generic[T]):
"""See 06-ArithmeticCoding.pdf"""
V: int
"""Bits to use for representing probability masses"""
U: int
"""Bits to use for representing interval widths"""
p_V: dict[T, float]
"""Quantized probability masses with V bits"""
c_V: dict[T, float]
"""Quantized cumulative probabilities (cmf/cdf) with V bits"""
def __init__(self, probabilities: OrderedDict[T, float], model: ModelType = 'iid',
prefix_free: bool = False, V: int = 4, U: int = 4):
super().__init__(probabilities, model, prefix_free)
self.V = V
self.U = U
self.quantize_probabilities()
def quantize_probabilities(self):
self.p_V = {
symbol: round(prob * (2 ** self.V))
for symbol, prob in self.probabilities.items()
}
accumulated_ps = list(itertools.accumulate(self.p_V.values(), initial=0))
self.c_V = {
symbol: accumulated_ps[i]
for i, symbol in enumerate(self.probabilities)
}
assert all(p > 0 for p in self.p_V.values()), 'all must probabilities must be > 0. Try a greater precision'
assert sum(self.p_V.values()) <= 2 ** self.V, 'invalid quantization'
def get_num_codeword_bits(self, message: Iterable[T]) -> int:
a = 1 if self.is_prefix_free else 0
return a + z_n - self.U + 1
def encode(self, message: Iterable[T], *, max_length: int = None) -> bytes:
V = self.V
U = self.U
# INIT
A = 2 ** U - 1
B = 0
c = 0
mask = (1 << (U + V)) - 1
# print('init')
# print('A', A)
# print('c', c)
# print('B', B)
bit_stream: list[Bit] = []
# ITERATIVE ENCODING
for symbol in tqdm(message, total=max_length):
# print('loop')
# CALCULATE
A_ast = A * self.p_V[symbol]
B_ast = B + A * self.c_V[symbol]
# print('A*', A_ast)
# print('B*', B_ast)
# DETERMINE NUMBER OF LEADING ZERO BITS (check at most V bits)
delta_z = min(leading_zeros(A_ast, U + V), V)
# print('∆z', delta_z)
# CHECK FOR CARRY BIT
B_ast, c, new_bits = handle_carry(B_ast, U + V + 1, c)
# print('carry?', B_ast, new_bits)
bit_stream.extend(new_bits)
# print('update bitstream', bit_stream)
# INVESTIGATE delta_z LEADING ZERO BITS
if delta_z > 0:
# print('B* binary', bit_string(B_ast, U + V))
B_ast_z_leading_bits = bit_string(B_ast, U + V)[:delta_z]
# print('∆z leading B*', B_ast_z_leading_bits)
n_1 = trailing_ones(int(B_ast_z_leading_bits, base=2), delta_z)
# print('n_1', n_1)
if n_1 < delta_z:
# print('case 1')
# output c outstanding bits
if c > 0:
bit_stream.append(0)
c -= 1
while c > 0:
bit_stream.append(1)
c -= 1
# bit_stream.extend(read_bits_from_string(bit_string(c)))
# print('update bitstream', bit_stream)
# output first ∆z - n_1 - 1 bits of B*
bit_stream.extend(
read_bits_from_string(B_ast_z_leading_bits[:-n_1 - 1])
)
# print('update bitstream', bit_stream)
c = n_1 + 1
elif n_1 == delta_z and c > 0:
# print('case 2')
c += n_1
elif n_1 == delta_z and c == 0:
# print('case 3')
bit_stream.extend(read_bits_from_string(B_ast_z_leading_bits))
# print('update bitstream', bit_stream)
c = 0
# print('update bitstream', bit_stream)
# print('updated c', c)
# UPDATE PARAMETERS
A = A_ast >> (V - delta_z)
B = (B_ast << delta_z) & mask
# print('A', A)
# print('B', B)
# print('--------------------\n')
# CODEWORD TERMINATION
# print('terminate')
a = 1 if self.is_prefix_free else 0
X = U + V - a - 1
if '1' in bit_string(B, U + V)[-X:]:
B += (1 << X) # round up lower interval boundary
# print('B', bin(B))
B, c, new_bits = handle_carry(B, U + V + 1, c)
# print('B', bin(B))
# B_ast = B + (1 << X)
# B_ast, c, new_bits = handle_carry(B_ast, U + V + 1, c)
# print('updated c', c)
bit_stream.extend(new_bits)
# print('update bitstream', bit_stream)
# output all outstanding bits
bit_stream.append(0)
bit_stream.extend([1] * (c - 1))
# print('update bitstream', bit_stream)
B_most_significant_bits = bit_string(B, U + V)[:a + 1]
bit_stream.extend(read_bits_from_string(B_most_significant_bits))
# print('final bitstream', bit_stream)
return write_bits(bit_stream)
def decode(self, byte_stream: bytes, *, max_length: int = None, num_bits: int = None) -> Iterable[T]:
V = self.V
UV = self.U + self.V
# INIT
A = 2 ** self.U - 1
u = int(
''.join(
str(bit) for bit in itertools.islice(read_bits(byte_stream), UV)
),
base=2,
)
# print('init')
# print(A, u)
# ITERATIVE DECODING
for n in range(max_length):
# IDENTIFY NEXT SYMBOL
for symbol in self.probabilities:
U = A * (self.c_V[symbol] + self.p_V[symbol])
# print(f'U({symbol})', U)
if u < U:
# print('output', symbol)
yield symbol
# UPDATE PARAMETERS
A_ast = A * self.p_V[symbol]
# print('A*', A_ast)
u_ast = u - A * self.c_V[symbol]
# print('u*', u_ast)
delta_z = leading_zeros(A_ast, UV)
# print('∆z', delta_z)
u = (u - A * self.c_V[symbol]) << delta_z
A = A_ast >> (V - delta_z)
break
```
#### File: prefix_codes/codecs/base.py
```python
from abc import ABC, abstractmethod
from collections.abc import Hashable, Iterable
from math import ceil
from typing import TypeVar, Generic
T = TypeVar('T', bound=Hashable)
META_BYTES = 30
class BaseCodec(ABC, Generic[T]):
@staticmethod
def parse_byte_stream(serialization: bytes) -> tuple[bytes, bytes, int]:
meta = serialization[:META_BYTES]
codec_data_and_message = serialization[META_BYTES:]
num_codec_data_bytes = int.from_bytes(meta[:META_BYTES // 2], byteorder='big')
message_length = int.from_bytes(meta[META_BYTES // 2:], byteorder='big')
codec_data = codec_data_and_message[:num_codec_data_bytes]
enc_message = codec_data_and_message[num_codec_data_bytes:]
return codec_data, enc_message, message_length
@classmethod
@abstractmethod
def decode_byte_stream(cls, serialization: bytes) -> Iterable[T]:
...
@abstractmethod
def encode(self, message: Iterable[T], *, max_length: int = None) -> bytes:
...
@abstractmethod
def decode(self, byte_stream: bytes, *, max_length: int = None) -> Iterable[T]:
...
def serialize(self, message: Iterable[T]) -> bytes:
codec_data = self.serialize_codec_data(message)
message_length = len(list(message))
assert ceil(len(codec_data).bit_length() / 8) <= META_BYTES // 2, (
f'codec data is too large'
)
assert ceil(message_length.bit_length() / 8) <= META_BYTES // 2, (
f'message is too large'
)
return (
len(codec_data).to_bytes(length=META_BYTES // 2, byteorder='big')
+ message_length.to_bytes(length=META_BYTES // 2, byteorder='big')
+ codec_data
+ self.encode(message)
)
@abstractmethod
def serialize_codec_data(self, message: Iterable[T]) -> bytes:
...
```
#### File: prefix-codes/prefix_codes/utils.py
```python
import itertools
from collections import Counter
from collections.abc import Hashable, Iterable
from typing import TypeVar
from prefix_codes.typedefs import BitStream, Bit
T = TypeVar('T')
H = TypeVar('H', bound=Hashable)
def get_bit(byte: int, i: int) -> Bit:
return (byte >> i) & 1
def set_bit(byte: int, i: int, bit: Bit) -> int:
"""See https://stackoverflow.com/a/12174051/6928824"""
mask = (1 << i)
return (byte & ~mask) | (bit << i)
def get_byte(bit_stream: BitStream) -> int:
byte = 0
for i, bit in enumerate(bit_stream):
assert 0 <= i < 8, 'bit stream is too long'
byte = set_bit(byte, i, bit)
return byte
def read_bits(message: Iterable[int]) -> BitStream:
# return ((byte >> i) & 1 for byte in message for i in range(8))
return (get_bit(byte, i) for byte in message for i in range(8))
def read_bits_from_string(s: str) -> BitStream:
assert all(char in ('0', '1') for char in set(s)), f'Expected bit string but got "{s}"'
return (int(char) for char in s)
def chunk(collection: Iterable[T], n: int) -> Iterable[Iterable[T]]:
return itertools.zip_longest(*[iter(collection)]*n, fillvalue=0)
def write_bits(bit_stream: BitStream) -> bytes:
"""Inverse of `read_bits`."""
return bytes(
get_byte(bits) for bits in chunk(bit_stream, n=8)
)
def get_relative_frequencies(message: Iterable[H]) -> dict[H, float]:
counter = Counter(message)
n = sum(counter.values())
return {
symbol: count / n
for symbol, count in counter.items()
}
``` |
{
"source": "jneuendorf/price_tracker",
"score": 2
} |
#### File: price_tracker/tracker/admin.py
```python
from django.contrib import admin
from .models import (
HtmlNode,
Page,
PriceParser,
RunResult,
UserAgent,
)
from .models.recipient import CallMeBotRecipient
class CallMeBotRecipientInline(admin.TabularInline):
model = CallMeBotRecipient.pages.through
class RunResultInline(admin.TabularInline):
model = RunResult
class HtmlNodeInline(admin.TabularInline):
model = HtmlNode
@admin.register(Page)
class PageAdmin(admin.ModelAdmin):
inlines = [CallMeBotRecipientInline, RunResultInline]
readonly_fields = ['is_active']
actions = ["run", "run_test"]
def run(self, request, queryset):
for instance in queryset:
instance.run(force=True)
def run_test(self, request, queryset):
for instance in queryset:
instance.run(force=True, test=True)
@admin.register(RunResult)
class RunResultAdmin(admin.ModelAdmin):
readonly_fields = ['created_at']
inlines = [HtmlNodeInline]
@admin.register(HtmlNode)
class HtmlNodeAdmin(admin.ModelAdmin):
readonly_fields = ['run_result']
admin.site.register(PriceParser)
admin.site.register(CallMeBotRecipient)
admin.site.register(UserAgent)
```
#### File: tracker/models/page.py
```python
from datetime import timedelta
import logging
import traceback
from urllib.parse import urlparse
from django.db import models
from django.utils import timezone
import requests
from tracker.util import find_html_nodes, random_referer
from .price_parser import PriceParser
from .user_agent import UserAgent
logger = logging.getLogger(__name__)
class Page(models.Model):
name = models.CharField(max_length=80)
url = models.URLField(max_length=800, unique=True)
css_selector = models.CharField(max_length=200)
price_parser = models.ForeignKey(
PriceParser,
on_delete=models.PROTECT,
)
interval = models.PositiveSmallIntegerField()
interval_unit = models.CharField(
max_length=7,
# Must be datetime.timedelta compatible
choices=[
('minutes', 'minutes'),
('hours', 'hours'),
('days', 'days'),
],
)
is_active = models.BooleanField(blank=True, default=False)
is_archived = models.BooleanField(blank=True, default=False)
def get_last_run_result(self):
return self.run_results.order_by('created_at').last()
def needs_run(self):
last_run = self.get_last_run_result()
return (
last_run is None
or (
(
last_run.created_at
+ timedelta(**{self.interval_unit: self.interval})
)
<= timezone.now()
)
)
# TODO: with django.db.transaction.atomic() ?
def run(self, user_agents=None, force=False, test=False):
# TODO: Declutter cyclic dependency
from .run_result import RunResult
if not force and (self.is_active or not self.needs_run()):
logger.info(f'skipping {self.name}')
return
self.is_active = True
self.save()
try:
response = requests.get(
self.url,
headers={
'Accept': (
'text/html,application/xhtml+xml,application/xml;'
'q=0.9,image/avif,image/webp,image/apng,*/*;'
'q=0.8,application/signed-exchange;v=b3;q=0.9'
),
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8',
'Cache-Control': 'max-age=0',
'Referer': random_referer(),
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'none',
'Upgrade-Insecure-Requests': '1',
'User-Agent': UserAgent.random(queryset=user_agents),
# 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36',
},
)
html = response.text
html_nodes = find_html_nodes(html, self.css_selector)
if test:
logger.info(
f'found nodes {"".join(str(node) for node in html_nodes)}'
)
else:
run_result = RunResult.objects.create(page=self)
for node in html_nodes:
run_result.html_nodes.create(
html=node.prettify(),
price=self.price_parser.get_price(node),
)
except Exception as e:
logger.error(str(e))
logger.error(traceback.print_exc())
self.is_active = False
self.save()
logger.info(f'successfully ran {self.name}')
def get_price_drop(self):
run_results = self.run_results.order_by('-created_at')
if len(run_results) == 0:
return (-1, -1)
if len(run_results) >= 2:
last, next_to_last = run_results[0:2]
else:
last = next_to_last = run_results[0]
prices_last = last.get_prices()
prices_next_to_last = next_to_last.get_prices()
if len(prices_last) == 1 and len(prices_next_to_last) == 1:
return (prices_next_to_last[0], prices_last[0])
raise ValueError(
'Could not determine if the price has dropped because price is '
f'not definite for either Page(id={last.id}) -> {prices_last} or '
f'Page(id={next_to_last.id}) -> {prices_next_to_last}'
)
def get_readable_url(self):
parsed_url = urlparse(self.url)
return f'{parsed_url.netloc}/…/{parsed_url.path.split("/")[-1]}'
def __str__(self):
short_units = {
'minutes': 'm',
'hours': 'h',
'days': 'd',
}
return (
f'<Page url="{self.get_readable_url()}" '
f'css_selector="{self.css_selector}" '
f'interval="{self.interval}{short_units[self.interval_unit]}">'
)
``` |
{
"source": "jneuendorf/pyllute",
"score": 3
} |
#### File: pyllute/scripts/_utils.py
```python
import json
import os
TESTS_DIR = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'__tests__',
)
def load_json(testname):
json_file = os.path.join(TESTS_DIR, f'{testname}.json')
# print(json_file)
with open(json_file) as f:
return json.load(f)
def run(data, namespaces, serializers):
namespace_dicts = [
namespace if isinstance(namespace, dict) else vars(namespace)
for namespace in namespaces
]
results = {}
for funcname, args2d in data.items():
print(funcname, '..............')
serializer = serializers.get(funcname)
func_results = []
for args in args2d:
func = _get_func(funcname, namespace_dicts)
try:
result = func(*args)
if callable(serializer):
print(result, type(result))
result = serializer(result)
except Exception as e:
result = {
'__error__': {
'type': type(e).__name__,
'message': (
e.message
if hasattr(e, 'message')
else ', '.join(e.args)
),
},
}
print('encoded caught error', str(e), 'as', str(result))
func_results.append(result)
results[funcname] = func_results
return results
def _get_func(funcname, namespace_dicts):
for namespace_dict in namespace_dicts:
if funcname in namespace_dict:
return namespace_dict[funcname]
raise ValueError(f'Could not find {funcname} in namespaces.')
def save_json(testname, data):
json_file = os.path.join(TESTS_DIR, f'{testname}_expected.json')
# print(json_file)
try:
with open(json_file, 'w') as f:
json.dump(data, f, indent=2)
except TypeError:
print(data)
raise
return json_file
def generate(testname, namespaces, serializer):
filename = save_json(
testname,
run(
load_json(testname),
namespaces,
serializer,
)
)
print('generated', filename)
``` |
{
"source": "jneuendorf/python_to_js",
"score": 4
} |
#### File: jneuendorf/python_to_js/f.py
```python
def f(a, b=1, *c, d, e=3, **f):
print(a,b,c,d,e,f)
f(1,2,3,4,5,d='d')
# 1 2 (3, 4, 5) d 3 {}
f(1,b=2,c=(3,4,5),d='d')
# 1 2 () d 3 {'c': (3, 4, 5)}
# function f(args, kwargs) {
# let [a, b=1, ...c] = args
# let {d, e=3, ...f} = kwargs
# // try to take all args from kwargs (except c because rest):
# if (kwargs.hasOwnProperty('a')) a = kwargs.a
# if (kwargs.hasOwnProperty('b')) b = kwargs.b
# // If kwargs has an arg the arg cannot not also be in args because this would
# // be a SyntaxError in python.
# [a, b]
# }
# f([1,2,3,4,5], {d:'d'})
# f([1], {b:2, c: [3,4,5], d:'d'})
```
#### File: src/py/statements.py
```python
import ast
from utils import consume
@consume('targets', 'value')
def _map_assign(babel_node, node, parents):
targets = babel_node['targets']
value = babel_node['value']
if len(targets) == 1:
target = targets[0]
if isinstance(target, ast.Tuple):
raise ValueError('tuple unpacking is not yet supported')
elif isinstance(target, ast.List):
raise ValueError('list unpacking is not yet supported')
else:
return {
'type': 'VariableDeclaration',
'declarations': [
{
'type': 'VariableDeclarator',
'id': target,
'init': value,
}
],
'kind': 'var',
}
else:
raise ValueError('assigning to multiple variables is not yet supported')
@consume('targets', 'value')
def _map_assign_to_class_prop(babel_node, node, parents):
targets = babel_node['targets']
value = babel_node['value']
if len(targets) == 1:
target = targets[0]
# Unpacking is not supported by ES6 => custom implementation
if isinstance(target, ast.Tuple):
raise ValueError('tuple unpacking is not yet supported')
elif isinstance(target, ast.List):
raise ValueError('list unpacking is not yet supported')
else:
return {
'type': 'ClassProperty',
# TODO: How to detect this?
'static': False,
'computed': False,
# TODO: ?
# 'varianace': None,
'key': target,
'value': value,
}
else:
raise ValueError('assigning to multiple variables is not yet supported')
def map_assign(babel_node, node, parents):
if parents and isinstance(parents[0], ast.ClassDef):
return _map_assign_to_class_prop(babel_node, node, parents)
else:
return _map_assign(babel_node, node, parents)
``` |
{
"source": "jneuendorf/raspi_doorbell",
"score": 3
} |
#### File: raspi_doorbell/src/utils.py
```python
from datetime import datetime, time
from argon2 import PasswordHasher, exceptions
password_hasher = PasswordHasher()
def is_time_between(begin_time, end_time, check_time):
"""
https://stackoverflow.com/a/10048290/6928824
"""
if begin_time < end_time:
return check_time >= begin_time and check_time <= end_time
# crosses midnight
else:
return check_time >= begin_time or check_time <= end_time
def do_not_disturb_now(server_config):
do_not_disturb = server_config.do_not_disturb
now = datetime.now().time()
begin = time(*do_not_disturb['begin'])
end = time(*do_not_disturb['end'])
return is_time_between(begin, end, now)
def hash_password(password):
return password_hasher.hash(password)
def verify_password(hash, password):
return password_hasher.verify(hash, password)
VerificationError = exceptions.VerificationError
``` |
{
"source": "jneuendorf/what-should-i-eat",
"score": 2
} |
#### File: what-should-i-eat/fuelux_widgets/forms.py
```python
from django import forms
import fuelux_widgets
class FuelUxForm(forms.Form):
# make widgets aware of their names
# (make their name availabel in the render context)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field_name in self.fields:
widget = self.fields[field_name].widget
if isinstance(widget, fuelux_widgets.FuelUxWidget):
widget.set_name(field_name)
```
#### File: fuelux_widgets/templatetags/fuelux_widgets.py
```python
from django import template
from django.template.defaultfilters import stringfilter
from json import dumps
from django.core.serializers.json import DjangoJSONEncoder
register = template.Library()
# @register.filter(name='slice')
@register.filter
@stringfilter
def slice(value, indices):
'''
Divides the value; argument is the divisor.
Returns empty string on any error.
'''
[start, end] = [int(index) for index in indices.split(",")]
return value[start:end]
@register.filter(name='json')
def json(value):
try:
return dumps(value, cls=DjangoJSONEncoder)
except Exception as e:
try:
# assume models
return dumps(list(value.values()), cls=DjangoJSONEncoder)
except Exception as e:
# pass
print(str(e))
return '{}'
```
#### File: what-should-i-eat/recipe_book/views.py
```python
import random
import re
# from django.forms import formset_factory
from django.shortcuts import get_object_or_404, render
from .forms import AddRecipeForm, RecipeImageFormSet
from .models import Recipe, Tag, Ingredient, IngredientAmount
from . import utils
# HELPER FUNCTIONS
def rand_color():
return "#%02X%02X%02X" % (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255)
)
# VIEW FUNCTIONS
def index(request):
recipes = Recipe.objects.order_by('name')
return render(request, 'recipe_book/index.html', {
'title': 'recipe book',
'recipes': recipes,
})
def add(request):
if request.method == 'POST':
form = AddRecipeForm(request.POST)
recipe_image_formset = RecipeImageFormSet(
request.POST,
request.FILES,
)
if form.is_valid() and recipe_image_formset.is_valid():
# PARSE TAG NAMES AND INSERT NEW TAGS INTO THE DATABASE
existing_tags = set(tag.name for tag in Tag.objects.all())
submitted_tags = (
set(form.cleaned_data["tags"][0:-1].split(","))
if len(form.cleaned_data["tags"]) > 0
else set()
)
diff = submitted_tags - existing_tags
for tag in diff:
Tag.objects.create(
name=tag,
color=rand_color()
)
# PARSE INGREDIENTS AND INSERT NEW ONES INTO THE DATABASE
existing_ingredients = set(
ingredient.name for ingredient in Ingredient.objects.all()
)
submitted_ingredients = set(
re.split("\s*,\s*", form.cleaned_data["ingredients"])
)
# map ingredient (incl. amount) -> ingredient name
submitted_ingredient_names = {
submitted_ingredient: utils.extract_ingredient_name(
submitted_ingredient
)
for submitted_ingredient in submitted_ingredients
}
diff = (
set(submitted_ingredient_names.values()) -
existing_ingredients
)
relevant_ingredients = list(Ingredient.objects.filter(
name__in=submitted_ingredient_names
))
for ingredient in diff:
relevant_ingredients.append(Ingredient.objects.create(
name=ingredient,
))
recipe = Recipe.objects.create(
name=form.cleaned_data["name"],
description=form.cleaned_data["description"],
cooked_last=form.cleaned_data["cooked_last"],
)
recipe.tags = Tag.objects.filter(name__in=submitted_tags)
for ingredient in submitted_ingredients:
name = submitted_ingredient_names[ingredient]
# NOTE: This is difficult because 'name' is a plural
# but 'ingredient' might contain the singular of 'name'.
# Therefore, if replacing the plural does nothing
# (meaning the singular is present in 'ingredient')
# then the plural is truncated more and more
# until a match happens
amount = ingredient.replace(name, "").strip()
truncation = 1
while amount == ingredient:
parts = ingredient.split(name[:-truncation])
amount = parts[0].strip()
truncation += 1
IngredientAmount.objects.create(
# ingredient=ingredients.filter(name=ingredient).first(),
ingredient=Ingredient.objects.get(name=name),
# [i for i in relevant_ingredients if i.name == name][0],
recipe=recipe,
amount=amount,
)
recipe.save()
# cleaned data is used when saving the form
for recipe_image_form in recipe_image_formset:
recipe_image_form.instance.recipe = recipe
# recipe_image_form.save()
recipe_image_formset.save()
form = AddRecipeForm()
# if a GET (or any other method) we'll create a blank form
else:
form = AddRecipeForm()
recipe_image_formset = RecipeImageFormSet()
print(recipe_image_formset.initial)
print([form.initial for form in recipe_image_formset])
return render(request, 'recipe_book/add.html', {
'form': form,
'recipe_image_formset': recipe_image_formset,
'title': 'new recipe',
})
def detail(request, recipe_id):
recipe = get_object_or_404(Recipe, pk=recipe_id)
return render(request, 'recipe_book/detail.html', {'recipe': recipe})
def tags(request):
tags = Tag.objects.order_by('name')
return render(request, 'recipe_book/tags.html', {'tags': tags})
``` |
{
"source": "jneuff/pex",
"score": 2
} |
#### File: pex/pex/pex_bootstrapper.py
```python
from __future__ import absolute_import
import os
import sys
from pex import pex_warnings
from pex.common import die
from pex.interpreter import PythonInterpreter
from pex.interpreter_constraints import UnsatisfiableInterpreterConstraintsError
from pex.orderedset import OrderedSet
from pex.pex_info import PexInfo
from pex.tracer import TRACER
from pex.typing import TYPE_CHECKING, cast
from pex.variables import ENV
if TYPE_CHECKING:
from typing import (
Iterable,
Iterator,
List,
MutableSet,
NoReturn,
Optional,
Tuple,
Union,
Callable,
)
InterpreterIdentificationError = Tuple[str, str]
InterpreterOrError = Union[PythonInterpreter, InterpreterIdentificationError]
PathFilter = Callable[[str], bool]
# TODO(<NAME>): Move this to interpreter_constraints.py. As things stand, both pex/bin/pex.py
# and this file use this function. The Pex CLI should not depend on this file which hosts code
# used at PEX runtime.
def iter_compatible_interpreters(
path=None, # type: Optional[str]
valid_basenames=None, # type: Optional[Iterable[str]]
interpreter_constraints=None, # type: Optional[Iterable[str]]
preferred_interpreter=None, # type: Optional[PythonInterpreter]
):
# type: (...) -> Iterator[PythonInterpreter]
"""Find all compatible interpreters on the system within the supplied constraints.
:param path: A PATH-style string with files or directories separated by os.pathsep.
:param valid_basenames: Valid basenames for discovered interpreter binaries. If not specified,
Then all typical names are accepted (i.e.: python, python3, python2.7,
pypy, etc.).
:param interpreter_constraints: Interpreter type and version constraint strings as described in
`--interpreter-constraint`.
:param preferred_interpreter: For testing - an interpreter to prefer amongst all others.
Defaults to the current running interpreter.
Interpreters are searched for in `path` if specified and $PATH if not.
If no interpreters are found and there are no further constraints (neither `valid_basenames` nor
`interpreter_constraints` is specified) then the returned iterator will be empty. However, if
there are constraints specified, the returned iterator, although emtpy, will raise
`UnsatisfiableInterpreterConstraintsError` to provide information about any found interpreters
that did not match all the constraints.
"""
_valid_path = None # type: Optional[PathFilter]
if valid_basenames:
_valid_basenames = frozenset(cast("Iterable[str]", valid_basenames))
_valid_path = (
lambda interpreter_path: os.path.basename(interpreter_path) in _valid_basenames
)
def _iter_interpreters():
# type: () -> Iterator[InterpreterOrError]
seen = set()
normalized_paths = (
OrderedSet(os.path.realpath(p) for p in path.split(os.pathsep)) if path else None
)
# Prefer the current interpreter, if valid.
current_interpreter = preferred_interpreter or PythonInterpreter.get()
if not _valid_path or _valid_path(current_interpreter.binary):
if normalized_paths:
candidate_paths = frozenset(
(current_interpreter.binary, os.path.dirname(current_interpreter.binary))
)
candidate_paths_in_path = candidate_paths.intersection(normalized_paths)
if candidate_paths_in_path:
# In case the full path of the current interpreter binary was in the
# `normalized_paths` we're searching, remove it to prevent identifying it again
# just to then skip it as `seen`.
normalized_paths.discard(current_interpreter.binary)
seen.add(current_interpreter)
yield current_interpreter
else:
seen.add(current_interpreter)
yield current_interpreter
for interp in PythonInterpreter.iter_candidates(
paths=normalized_paths, path_filter=_valid_path
):
if interp not in seen:
seen.add(interp)
yield interp
def _valid_interpreter(interp_or_error):
# type: (InterpreterOrError) -> bool
if not isinstance(interp_or_error, PythonInterpreter):
return False
if not interpreter_constraints:
return True
interp = cast(PythonInterpreter, interp_or_error)
if any(
interp.identity.matches(interpreter_constraint)
for interpreter_constraint in interpreter_constraints
):
TRACER.log(
"Constraints on interpreters: {}, Matching Interpreter: {}".format(
interpreter_constraints, interp.binary
),
V=3,
)
return True
return False
candidates = [] # type: List[PythonInterpreter]
failures = [] # type: List[InterpreterIdentificationError]
found = False
for interpreter_or_error in _iter_interpreters():
if isinstance(interpreter_or_error, PythonInterpreter):
interpreter = cast(PythonInterpreter, interpreter_or_error)
candidates.append(interpreter)
if _valid_interpreter(interpreter_or_error):
found = True
yield interpreter
else:
error = cast("InterpreterIdentificationError", interpreter_or_error)
failures.append(error)
if not found and (interpreter_constraints or valid_basenames):
constraints = [] # type: List[str]
if interpreter_constraints:
constraints.append("Version matches {}".format(" or ".join(interpreter_constraints)))
if valid_basenames:
constraints.append("Basename is {}".format(" or ".join(valid_basenames)))
raise UnsatisfiableInterpreterConstraintsError(constraints, candidates, failures)
def _select_path_interpreter(
path=None, # type: Optional[str]
valid_basenames=None, # type: Optional[Tuple[str, ...]]
compatibility_constraints=None, # type: Optional[Iterable[str]]
):
# type: (...) -> Optional[PythonInterpreter]
candidate_interpreters_iter = iter_compatible_interpreters(
path=path,
valid_basenames=valid_basenames,
interpreter_constraints=compatibility_constraints,
)
current_interpreter = PythonInterpreter.get() # type: PythonInterpreter
candidate_interpreters = []
for interpreter in candidate_interpreters_iter:
if current_interpreter == interpreter:
# Always prefer continuing with the current interpreter when possible to avoid re-exec
# overhead.
return current_interpreter
else:
candidate_interpreters.append(interpreter)
if not candidate_interpreters:
return None
# TODO: Allow the selection strategy to be parameterized:
# https://github.com/pantsbuild/pex/issues/430
return PythonInterpreter.latest_release_of_min_compatible_version(candidate_interpreters)
def maybe_reexec_pex(compatibility_constraints=None):
# type: (Optional[Iterable[str]]) -> Union[None, NoReturn]
"""Handle environment overrides for the Python interpreter to use when executing this pex.
This function supports interpreter filtering based on interpreter constraints stored in PEX-INFO
metadata. If PEX_PYTHON is set it attempts to obtain the binary location of the interpreter
specified by PEX_PYTHON. If PEX_PYTHON_PATH is set, it attempts to search the path for a matching
interpreter in accordance with the interpreter constraints. If both variables are present, this
function gives precedence to PEX_PYTHON_PATH and errors out if no compatible interpreters can be
found on said path.
If neither variable is set, we fall back to plain PEX execution using PATH searching or the
currently executing interpreter. If compatibility constraints are used, we match those constraints
against these interpreters.
:param compatibility_constraints: optional list of requirements-style strings that constrain the
Python interpreter to re-exec this pex with.
"""
current_interpreter = PythonInterpreter.get()
target = None # type: Optional[PythonInterpreter]
# NB: Used only for tests.
if "_PEX_EXEC_CHAIN" in os.environ:
flag_or_chain = os.environ.pop("_PEX_EXEC_CHAIN")
pex_exec_chain = [] if flag_or_chain == "1" else flag_or_chain.split(os.pathsep)
pex_exec_chain.append(current_interpreter.binary)
os.environ["_PEX_EXEC_CHAIN"] = os.pathsep.join(pex_exec_chain)
current_interpreter_blessed_env_var = "_PEX_SHOULD_EXIT_BOOTSTRAP_REEXEC"
if os.environ.pop(current_interpreter_blessed_env_var, None):
# We've already been here and selected an interpreter. Continue to execution.
return None
from . import pex
pythonpath = pex.PEX.stash_pythonpath()
if pythonpath is not None:
TRACER.log("Stashed PYTHONPATH of {}".format(pythonpath), V=2)
with TRACER.timed("Selecting runtime interpreter", V=3):
if ENV.PEX_PYTHON and not ENV.PEX_PYTHON_PATH:
# preserve PEX_PYTHON re-exec for backwards compatibility
# TODO: Kill this off completely in favor of PEX_PYTHON_PATH
# https://github.com/pantsbuild/pex/issues/431
TRACER.log(
"Using PEX_PYTHON={} constrained by {}".format(
ENV.PEX_PYTHON, compatibility_constraints
),
V=3,
)
try:
if os.path.isabs(ENV.PEX_PYTHON):
target = _select_path_interpreter(
path=ENV.PEX_PYTHON,
compatibility_constraints=compatibility_constraints,
)
else:
target = _select_path_interpreter(
valid_basenames=(os.path.basename(ENV.PEX_PYTHON),),
compatibility_constraints=compatibility_constraints,
)
except UnsatisfiableInterpreterConstraintsError as e:
die(
e.create_message(
"Failed to find a compatible PEX_PYTHON={pex_python}.".format(
pex_python=ENV.PEX_PYTHON
)
)
)
elif ENV.PEX_PYTHON_PATH or compatibility_constraints:
TRACER.log(
"Using {path} constrained by {constraints}".format(
path="PEX_PYTHON_PATH={}".format(ENV.PEX_PYTHON_PATH)
if ENV.PEX_PYTHON_PATH
else "$PATH",
constraints=compatibility_constraints,
),
V=3,
)
try:
target = _select_path_interpreter(
path=ENV.PEX_PYTHON_PATH, compatibility_constraints=compatibility_constraints
)
except UnsatisfiableInterpreterConstraintsError as e:
die(
e.create_message(
"Failed to find compatible interpreter on path {path}.".format(
path=ENV.PEX_PYTHON_PATH or os.getenv("PATH")
)
)
)
elif pythonpath is None:
TRACER.log(
"Using the current interpreter {} since no constraints have been specified and "
"PYTHONPATH is not set.".format(sys.executable),
V=3,
)
return None
else:
target = current_interpreter
if not target:
# N.B.: This can only happen when PEX_PYTHON_PATH is set and compatibility_constraints is
# not set, but we handle all constraints generally for sanity sake.
constraints = []
if ENV.PEX_PYTHON:
constraints.append("PEX_PYTHON={}".format(ENV.PEX_PYTHON))
if ENV.PEX_PYTHON_PATH:
constraints.append("PEX_PYTHON_PATH={}".format(ENV.PEX_PYTHON_PATH))
if compatibility_constraints:
constraints.extend(
"--interpreter-constraint={}".format(compatibility_constraint)
for compatibility_constraint in compatibility_constraints
)
die(
"Failed to find an appropriate Python interpreter.\n"
"\n"
"Although the current interpreter is {python}, the following constraints exclude it:\n"
" {constraints}".format(python=sys.executable, constraints="\n ".join(constraints))
)
os.environ.pop("PEX_PYTHON", None)
os.environ.pop("PEX_PYTHON_PATH", None)
if pythonpath is None and target == current_interpreter:
TRACER.log(
"Using the current interpreter {} since it matches constraints and "
"PYTHONPATH is not set.".format(sys.executable)
)
return None
target_binary = target.binary
cmdline = [target_binary] + sys.argv
TRACER.log(
"Re-executing: "
"cmdline={cmdline!r}, "
"sys.executable={python!r}, "
"PEX_PYTHON={pex_python!r}, "
"PEX_PYTHON_PATH={pex_python_path!r}, "
"COMPATIBILITY_CONSTRAINTS={compatibility_constraints!r}"
"{pythonpath}".format(
cmdline=" ".join(cmdline),
python=sys.executable,
pex_python=ENV.PEX_PYTHON,
pex_python_path=ENV.PEX_PYTHON_PATH,
compatibility_constraints=compatibility_constraints,
pythonpath=', (stashed) PYTHONPATH="{}"'.format(pythonpath)
if pythonpath is not None
else "",
)
)
# Avoid a re-run through compatibility_constraint checking.
os.environ[current_interpreter_blessed_env_var] = "1"
os.execv(target_binary, cmdline)
def _bootstrap(entry_point):
# type: (str) -> PexInfo
pex_info = PexInfo.from_pex(entry_point) # type: PexInfo
pex_info.update(PexInfo.from_env())
pex_warnings.configure_warnings(pex_info, ENV)
return pex_info
# NB: This helper is used by the PEX bootstrap __main__.py code.
def bootstrap_pex(entry_point):
# type: (str) -> None
pex_info = _bootstrap(entry_point)
maybe_reexec_pex(pex_info.interpreter_constraints)
from . import pex
pex.PEX(entry_point).execute()
# NB: This helper is used by third party libs - namely https://github.com/wickman/lambdex.
# TODO(<NAME>): Kill once https://github.com/wickman/lambdex/issues/5 is resolved.
def is_compressed(entry_point):
# type: (str) -> bool
return os.path.exists(entry_point) and not os.path.exists(
os.path.join(entry_point, PexInfo.PATH)
)
# NB: This helper is used by third party libs like https://github.com/wickman/lambdex and
# https://github.com/kwlzn/pyuwsgi_pex.
def bootstrap_pex_env(entry_point):
# type: (str) -> None
"""Bootstrap the current runtime environment using a given pex."""
pex_info = _bootstrap(entry_point)
from .environment import PEXEnvironment
PEXEnvironment(entry_point, pex_info).activate()
```
#### File: pex/tests/test_pex_info.py
```python
import os.path
import warnings
import pytest
from pex.common import temporary_dir
from pex.orderedset import OrderedSet
from pex.pex_info import PexInfo
from pex.pex_warnings import PEXWarning
from pex.typing import TYPE_CHECKING
from pex.variables import Variables
from pex.version import __version__ as pex_version
if TYPE_CHECKING:
from typing import Dict, List, Text
def test_backwards_incompatible_pex_info():
# type: () -> None
def make_pex_info(requirements):
# type: (List[Text]) -> PexInfo
return PexInfo(info={"requirements": requirements})
# forwards compatibility
pi = make_pex_info(["hello"])
assert pi.requirements == OrderedSet(["hello"])
pi = make_pex_info(["hello==0.1", "world==0.2"])
assert pi.requirements == OrderedSet(["hello==0.1", "world==0.2"])
# malformed
with pytest.raises(ValueError):
make_pex_info("hello") # type: ignore[arg-type]
with pytest.raises(ValueError):
make_pex_info([("hello", False)]) # type: ignore[list-item]
# backwards compatibility
pi = make_pex_info(
[
["hello==0.1", False, None], # type: ignore[list-item]
["world==0.2", False, None], # type: ignore[list-item]
]
)
assert pi.requirements == OrderedSet(["hello==0.1", "world==0.2"])
def assert_same_info(expected, actual):
# type: (PexInfo, PexInfo) -> None
assert expected.dump(sort_keys=True) == actual.dump(sort_keys=True)
def test_from_empty_env():
# type: () -> None
environ = Variables(environ={})
info = {} # type: Dict
assert_same_info(PexInfo(info=info), PexInfo.from_env(env=environ))
def test_from_env():
# type: () -> None
with temporary_dir() as td:
pex_root = os.path.realpath(os.path.join(td, "pex_root"))
environ = dict(
PEX_ROOT=pex_root,
PEX_MODULE="entry:point",
PEX_SCRIPT="script.sh",
PEX_FORCE_LOCAL="true",
PEX_UNZIP="true",
PEX_INHERIT_PATH="prefer",
PEX_IGNORE_ERRORS="true",
PEX_ALWAYS_CACHE="true",
)
info = dict(
pex_root=pex_root,
entry_point="entry:point",
script="script.sh",
zip_safe=False,
unzip=True,
inherit_path=True,
ignore_errors=True,
always_write_cache=True,
)
assert_same_info(PexInfo(info=info), PexInfo.from_env(env=Variables(environ=environ)))
def test_build_properties():
# type: () -> None
assert pex_version == PexInfo.default().build_properties["pex_version"]
def test_merge_split():
# type: () -> None
path_1, path_2 = "/pex/path/1:/pex/path/2", "/pex/path/3:/pex/path/4"
result = PexInfo._merge_split(path_1, path_2)
assert result == ["/pex/path/1", "/pex/path/2", "/pex/path/3", "/pex/path/4"]
path_1, path_2 = "/pex/path/1:", "/pex/path/3:/pex/path/4"
result = PexInfo._merge_split(path_1, path_2)
assert result == ["/pex/path/1", "/pex/path/3", "/pex/path/4"]
path_1, path_2 = "/pex/path/1::/pex/path/2", "/pex/path/3:/pex/path/4"
result = PexInfo._merge_split(path_1, path_2)
assert result == ["/pex/path/1", "/pex/path/2", "/pex/path/3", "/pex/path/4"]
path_1, path_2 = "/pex/path/1::/pex/path/2", "/pex/path/3:/pex/path/4"
result = PexInfo._merge_split(path_1, None)
assert result == ["/pex/path/1", "/pex/path/2"]
result = PexInfo._merge_split(None, path_2)
assert result == ["/pex/path/3", "/pex/path/4"]
def test_pex_root_set_none():
# type: () -> None
pex_info = PexInfo.default()
pex_info.pex_root = None
assert PexInfo.default().pex_root == pex_info.pex_root
assert os.path.expanduser("~/.pex") == pex_info.pex_root
def test_pex_root_set_unwriteable():
# type: () -> None
with temporary_dir() as td:
pex_root = os.path.realpath(os.path.join(td, "pex_root"))
os.mkdir(pex_root, 0o444)
pex_info = PexInfo.default()
pex_info.pex_root = pex_root
with warnings.catch_warnings(record=True) as log:
assert pex_root != pex_info.pex_root
assert 1 == len(log)
message = log[0].message
assert isinstance(message, PEXWarning)
assert pex_root in str(message)
assert pex_info.pex_root in str(message)
``` |
{
"source": "jneug/drang-run",
"score": 3
} |
#### File: drang-run/tests/test_counter.py
```python
import pytest
from drang_run import Counter
def sign(n):
return (n > 0) - (n < 0)
@pytest.mark.parametrize(
"start,stop,step",
[
(1, 10, 1),
(1, 20, 2),
(1, 20, 3),
(100, 40, -1),
(0, 10, 2),
(5, 10, 1),
(10, -20, -1),
(-1, 40, 1),
(-99, 100, 1),
(-10, -50, -1),
(99, 100, 1),
],
)
def test_counter(start, stop, step):
expected = range(start, stop + sign(step), step)
cnt = Counter(start, stop, step)
assert list(cnt) == list(expected)
cnt_rev = Counter(start, stop, step, True)
assert list(cnt_rev) == list(reversed(expected))
@pytest.mark.parametrize(
"start,stop,step",
[
(1, 1, 1),
(1, 1, 2),
(1, 1, -1),
(0, 0, 1),
(0, 0, 10),
(0, 0, -10),
(100, 100, 1),
(100, 100, 2),
(100, 100, -1),
(-100, -100, -1),
(-100, -100, -2),
(-100, -100, 1),
],
)
def test_counter_start_eq_stop(start, stop, step):
assert start == stop
cnt = Counter(start, stop, step)
assert list(cnt) == [start]
cnt_rev = Counter(start, stop, step, True)
assert list(cnt_rev) == [start]
@pytest.mark.parametrize(
"start,stop,step",
[(99, 100, 2), (100, 99, -2), (0, 5, 10), (1, 10, 100), (500, 0, -1000)],
)
def test_counter_step_gt_diff(start, stop, step):
expected = range(start, stop + sign(step), step)
assert len(expected) == 1
cnt = Counter(start, stop, step)
assert list(cnt) == list(expected)
@pytest.mark.parametrize(
"start,stop,step,expected",
[
(1, 10, -1, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
(10, 1, 1, [10, 9, 8, 7, 6, 5, 4, 3, 2, 1]),
(-5, 5, -1, [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5]),
(5, -5, 1, [5, 4, 3, 2, 1, 0, -1, -2, -3, -4, -5]),
],
)
def test_counter_wrong_step(start, stop, step, expected):
cnt = Counter(start, stop, step)
assert list(cnt) == expected
def test_counter_step_zero():
with pytest.raises(ValueError):
Counter(1, 100, 0)
with pytest.raises(ValueError):
Counter(-10, 10, 0)
``` |
{
"source": "jneug/parboil",
"score": 3
} |
#### File: parboil/parboil/console.py
```python
import typing as t
import click
from colorama import Back, Fore, Style
def printd(
msg: str, echo: t.Optional[t.Callable[[str], t.Any]] = click.echo, decor: str = ""
) -> t.Any:
"""Print msg
Prefix with decor if given and passed to echo or returned if echo==None
"""
if callable(echo):
return echo(f"{decor}{msg}")
else:
return f"{decor}{msg}"
def info(msg: str, echo: t.Optional[t.Callable[[str], t.Any]] = click.echo) -> t.Any:
return printd(
msg, echo=echo, decor=f"[{Fore.BLUE}{Style.BRIGHT}i{Style.RESET_ALL}] "
)
def warn(msg: str, echo: t.Optional[t.Callable[[str], t.Any]] = click.echo) -> t.Any:
return printd(
msg, echo=echo, decor=f"[{Fore.YELLOW}{Style.BRIGHT}!{Style.RESET_ALL}] "
)
def error(msg: str, echo: t.Optional[t.Callable[[str], t.Any]] = click.echo) -> t.Any:
return printd(
msg, echo=echo, decor=f"[{Fore.RED}{Style.BRIGHT}X{Style.RESET_ALL}] "
)
def success(msg: str, echo: t.Optional[t.Callable[[str], t.Any]] = click.echo) -> t.Any:
return printd(
msg, echo=echo, decor=f"[{Fore.GREEN}{Style.BRIGHT}✓{Style.RESET_ALL}] "
)
def indent(msg: str, echo: t.Optional[t.Callable[[str], t.Any]] = click.echo) -> t.Any:
return printd(msg, echo=echo, decor=" ")
def question(
msg: str,
default: t.Optional[t.Any] = None,
echo: t.Callable[..., t.Any] = click.prompt,
color: t.Optional[int] = Fore.BLUE,
) -> t.Any:
msg = printd(msg, echo=None, decor=f"[{color}{Style.BRIGHT}?{Style.RESET_ALL}] ")
if default:
return echo(msg, default=default)
else:
return echo(msg)
```
#### File: parboil/parboil/logging.py
```python
import logging
import sys
import typing as t
from pathlib import Path
DEV = 15
DEV_NAME = "DEV"
def configure_logging(
loglevel: t.Union[int, str] = logging.CRITICAL,
logfile: t.Optional[t.Union[str, Path]] = None,
debugfile: t.Optional[t.Union[str, Path]] = None,
devfile: t.Optional[t.Union[str, Path]] = None,
) -> None:
# Setup new DEV log level
def logDEV(self, msg, *args, **kwargs):
if self.isEnabledFor(DEV):
self._log(DEV, msg, args, **kwargs)
logging.addLevelName(DEV, DEV_NAME)
setattr(logging, DEV_NAME, DEV)
setattr(logging.getLoggerClass(), DEV_NAME.lower(), logDEV)
logger = logging.getLogger("parboil")
logger.setLevel(logging.DEBUG)
if isinstance(loglevel, str):
loglevel = logging.getLevelName(loglevel)
# Create a file handler for the debugfile
# Logs all messages to the given filepath
if debugfile is not None:
dfile_handler = logging.FileHandler(debugfile)
dfile_handler.setLevel(logging.DEBUG)
logger.addHandler(dfile_handler)
# Log DEV messages to separate file
if devfile is not None:
dev_handler = logging.FileHandler(devfile)
dev_handler.setLevel(DEV)
logger.addHandler(dev_handler)
if logfile is not None:
lfile_handler = logging.FileHandler(logfile)
lfile_handler.setLevel(loglevel)
logger.addHandler(lfile_handler)
else:
# Log to stdout with given loglevel
stream_handler = logging.StreamHandler(stream=sys.stdout)
stream_handler.setLevel(loglevel)
logger.addHandler(stream_handler)
```
#### File: parboil/tests/test_boil_use.py
```python
from click.testing import CliRunner
from parboil.parboil import boil
def test_boil_use():
runner = CliRunner()
# use without args not allowed
result = runner.invoke(boil, ["use"])
assert result.exit_code == 2
# help message
result = runner.invoke(boil, ["use", "--help"])
assert result.exit_code == 0
assert "Usage: boil use" in result.output
```
#### File: parboil/tests/test_env.py
```python
import shutil, json
from pathlib import Path
import pytest
from click.testing import CliRunner
from parboil.parboil import boil
from parboil.project import Project, ProjectFileNotFoundError, PRJ_FILE, META_FILE
def test_environment(repo_path, tpl_path):
shutil.copytree(tpl_path, repo_path, dirs_exist_ok=True)
prj = Project("environ", repo_path)
prj.setup(load_project=True)
assert "Project" in prj.fields
``` |
{
"source": "jneug/schule-projekte",
"score": 4
} |
#### File: Abiturklassen/Python/binarysearchtree.py
```python
class BinarySearchTree(object):
class _BSTNode(object):
content = None
left = None
right = None
def __init__(self, pContent):
self.content = pContent
self.left = BinarySearchTree()
self.right = BinarySearchTree()
def __init__(self):
self._node = None
def isEmpty(self):
return self._node == None
def insert(self, pContent):
if pContent is not None:
if self.isEmpty():
self._node = self._BSTNode(pContent)
elif pContent < self._node.content:
self._node.left.insert(pContent)
elif pContent > self._node.content:
self._node.right.insert(pContent)
def getLeftTree(self):
if self.isEmpty():
return None
else:
return self._node.left
def getContent(self):
if self.isEmpty():
return None
else:
return self._node.content
def getRightTree(self):
if self.isEmpty():
return None
else:
return self._node.right
def remove(self, pContent):
if self.isEmpty() or pContent is None:
return
if pContent < node.content:
self._node.left.remove(pContent)
elif pContent > node.content:
self._node.right.remove(pContent)
else:
if self._node.left.isEmpty():
if self._node.right.isEmpty():
node = None
else:
node = self._getNodeOfRightSuccessor()
elif node.right.isEmpty():
node = self._getNodeOfLeftSuccessor()
else:
if self._getNodeOfRightSuccessor().left.isEmpty():
self._node.content = self._getNodeOfRightSuccessor().content
self._node.right = self._getNodeOfRightSuccessor().right
else:
previous = self._node.right._ancestorOfSmallRight()
smallest = previous._node.left
self._node.content = smallest._node.content
previous.remove(smallest._node.content)
def search(self, pContent):
if self.isEmpty() or pContent is None:
return None
else:
content = self.getContent()
if pContent < content:
return self.getLeftTree().search(pContent)
elif pContent > content:
return self.getRightTree().search(pContent)
elif pContent == content:
return content
else:
return None
def _ancestorOfSmallRight(self):
if self._getNodeOfLeftSuccessor().left.isEmpty():
return self
else:
return self._node.left._ancestorOfSmallRight()
def _getNodeOfLeftSuccessor(self):
return self._node.left._node
def _getNodeOfRightSuccessor(self):
return self._node.right._node
```
#### File: Abiturklassen/Python/stack.py
```python
class Stack(object):
class StackNode(object):
def __init__(self, pContent):
self._content = pContent
self._nextNode = None
def setNext(self, pNext):
self._nextNode = pNext
def getNext(self):
return self._nextNode
def getContent(self):
return self._content
def __init__(self):
self._head = None
def isEmpty(self):
return self._head is None
def push(self, pContent):
if pContent is not None:
node = self.StackNode(pContent)
node.setNext(self._head)
self._head = node
def pop(self):
if not self.isEmpty():
self._head = self._head.getNext()
def top(self):
if not self.isEmpty():
return self._head.getContent()
else:
return None
```
#### File: Automaten/Python/nka_terme.py
```python
def transition(state, char, stack_char):
new_state = -1
new_stack_chars = ""
if state == 0:
new_state = 1
new_stack_chars = "S#"
elif state == 1:
if stack_char in "0123456789+-*:().":
new_state = 1
new_stack_chars = ""
elif stack_char == "S":
if char in "123456789":
new_state = 1
new_stack_chars = "A"
elif char == "0":
new_state = 1
new_stack_chars = "B"
elif char == "(":
new_state = 1
new_stack_chars = "E)R"
elif stack_char == "A":
if char in "0123456789":
new_state = 1
new_stack_chars = "A"
elif char == ".":
new_state = 1
new_stack_chars = "C"
elif char in "+-:*":
new_state = 1
new_stack_chars = "E"
elif stack_char == "B":
if char == ".":
new_state = 1
new_stack_chars = "C"
elif char in "+-:*":
new_state = 1
new_stack_chars = "E"
elif stack_char == "C":
if char in "0123456789":
new_state = 1
new_stack_chars = "D"
elif stack_char == "D":
if char in "0123456789":
new_state = 1
new_stack_chars = "D"
elif char in "+-:*":
new_state = 1
new_stack_chars = "E"
elif stack_char == "E":
if char in "123456789":
new_state = 1
new_stack_chars = "A"
elif char == "0":
new_state = 1
new_stack_chars = "B"
elif char == "(":
new_state = 1
new_stack_chars = "E)R"
elif stack_char == "R":
if char in "+-:*":
new_state = 1
new_stack_chars = "E"
elif char == "":
new_state = 2
elif stack_char == "#":
new_state = 2
return new_state, new_stack_chars
def scan_word(word):
state = 0
stack = ["#"]
for char in word:
stack_char = stack.pop(0)
state, stack_chars = transition(state, char, stack_char)
for sc in reversed(stack_chars):
stack.insert(0, sc)
if len(stack) > 0:
transition(state, "", stack[0])
return word == "" and state == 2
if __name__ == "__main__":
word = input("Bitte ein Wort eingeben: ")
accepted = scan_word(word)
if accepted:
print("Wort gehört zur Sprache")
else:
print("Wort gehört nicht zur Sprache")
```
#### File: Python/Minensucher/minensucher.py
```python
from random import randint
FELD_BREITE = 15
FELD_HOEHE = 10
ANZAHL_MINEN = randint(
int(FELD_BREITE * FELD_HOEHE * 0.1), int(FELD_BREITE * FELD_HOEHE * 0.2)
)
WIDTH = FELD_BREITE * 20
HEIGHT = FELD_HOEHE * 20
feld = []
def minen_verteilen(anzahl):
for i in range(FELD_BREITE):
feld.append([])
for j in range(FELD_HOEHE):
if anzahl > 0 and randint(0, 10) < 3:
feld[i].append("X")
anzahl -= 1
else:
feld[i].append(0)
def anzahl_anpassen(i, j):
for x in range(3):
for y in range(3):
new_i = i - 1 + x
new_j = j - 1 + y
if new_i >= 0 and new_i < FELD_BREITE and new_j >= 0 and new_j < FELD_HOEHE:
if feld[new_i][new_j] != "X":
feld[new_i][new_j] += 1
def minen_zaehlen():
for i in range(FELD_BREITE):
for j in range(FELD_HOEHE):
cell = feld[i][j]
if cell == "X":
anzahl_anpassen(i, j)
sprites = []
def feld_aufbauen():
for i in range(FELD_BREITE):
for j in range(FELD_HOEHE):
inhalt = feld[i][j]
if inhalt == "X":
bomb_sprite = Actor("bomb")
bomb_sprite.center = (i * 20 + 10, j * 20 + 10)
sprites.append(bomb_sprite)
feld_sprite = Actor("feld")
feld_sprite.topleft = (i * 20, j * 20)
sprites.append(feld_sprite)
minen_verteilen(ANZAHL_MINEN)
minen_zaehlen()
feld_aufbauen()
def draw():
screen.clear()
for i in range(FELD_BREITE):
for j in range(FELD_HOEHE):
inhalt = feld[i][j]
screen.draw.textbox(str(inhalt), Rect((i*20,j*20), (20,20)))
for sprite in sprites:
sprite.draw()
def on_mouse_down(pos, button):
if button == mouse.LEFT:
for sprite in sprites:
if sprite.collidepoint(pos):
sprites.remove(sprite)
i, j = int(pos[0] / 20), int(pos[1] / 20)
if feld[i][j] == 'X':
print("Bombe!")
else:
print(feld[i][j])
```
#### File: Grundlagen/TigerJython/06.2-Baumallee.py
```python
from gturtle import *
def zeichneBaum (durchmesser):
forward(durchmesser)
dot(durchmesser/2)
forward(-durchmesser)
makeTurtle()
durchmesser = 30
abstand = 30
repeat 10:
zeichneBaum(durchmesser)
penUp()
right(90)
forward(abstand)
left(90)
forward(abstand)
penDown()
abstand *= 0.9
durchmesser *= 0.9
hideTurtle()
```
#### File: Grundlagen/TigerJython/08-Drehrad.py
```python
from gturtle import *
def dreieck():
repeat 3:
forward(100)
right(120)
makeTurtle()
runde = 1
while runde <= 6: # neu!
if runde == 1 or runde == 3 or runde == 5:
setPenColor("red")
else:
setPenColor("green")
fillToPoint() # neu !
dreieck()
right(60)
runde = runde + 1
```
#### File: Kryptografie/Python/caesar.py
```python
def ceasear_encode( msg, key ):
code = ""
key = ord(key.upper())-ord("A")
for c in msg.upper():
if c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
new_ord = ord(c)+key
if new_ord > ord("Z"):
new_ord -= 26
code += chr(new_ord)
else:
code += c
return code
def ceasear_decode( code, key ):
msg = ""
key = ord(key.upper())-ord("A")
for c in code.upper():
if c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
new_ord = ord(c)-key
if new_ord < ord("A"):
new_ord += 26
msg += chr(new_ord)
else:
msg += c
return msg
#print(ceasear_decode(ceasear_encode("HalloWelt", "F"), "F"))
msg = """
MRN BRLQNAQNRC NRWNA PNQNRVBLQAROC
MJAO WDA EXW MNA PNQNRVQJUCDWP
MNB BLQUDNBBNUB JKQJNWPNW, WRLQC
SNMXLQ EXW MNA PNQNRVQJUCDWP MNA
ENABLQUDNBBNUDWPBVNCQXMN.
"""
for key in "<KEY>":
#for key in "J":
print(key + ": " + ceasear_decode(msg, key)[:14])
```
#### File: Python/POP3/connection.py
```python
import socket
import os
class Connection(object):
def __init__(self, pServerIP, pServerPort, encoding='utf8'):
self.encoding = encoding
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.connect((pServerIP, pServerPort))
def send(self, pMessage):
if pMessage[-len(os.linesep):] != os.linesep:
pMessage += os.linesep
self._socket.sendall(bytes(pMessage, self.encoding))
def receive(self):
buf = ['']
while buf[-1] != '\n':
buf.append(self._socket.recv(1).decode(self.encoding))
if buf[-2] == '\r':
return ''.join(buf[:-2])
else:
return ''.join(buf[:-1])
def close(self):
self._socket.close()
``` |
{
"source": "Jneville0815/covidify",
"score": 4
} |
#### File: src/covidify/list_countries.py
```python
import os
import sys
import click
import covidify
import numpy as np
from covidify.sources import github
from covidify.config import SCRIPT
def get_countries():
print('Getting available countries...')
df = github.get()
df = df[df.confirmed > 0]
countries = sorted(list(set(df.country.values)))
for a,b,c in zip(countries[::3],countries[1::3],countries[2::3]):
print('{:<30}{:<30}{:<}'.format(a,b,c))
print('\n\033[1;31mNUMBER OF COUNTRIES/AREAS INFECTED:\033[0;0m', len(countries))
``` |
{
"source": "JNevrly/cookiecutter-pypackage-poetry",
"score": 2
} |
#### File: {{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/cli.py
```python
import sys
{%- if cookiecutter.use_onacol == 'y' %}
import pkg_resources
{%- endif %}
{%- if cookiecutter.use_classic_aiohttp_setup == 'y' %}
import asyncio
import logging
{%- endif %}
{%- if cookiecutter.command_line_interface|lower == 'click' %}
import click
{%- if cookiecutter.use_onacol == 'y' %}
from onacol import ConfigManager, ConfigValidationError
{%- endif %}
{%- endif %}
{%- if cookiecutter.use_onacol == 'y' %}
DEFAULT_CONFIG_FILE = pkg_resources.resource_filename(
"{{cookiecutter.project_slug}}", "default_config.yaml")
{% endif %}
{% if cookiecutter.use_classic_aiohttp_setup == 'y' %}
logger = logging.getLogger("{{cookiecutter.project_slug}}")
def global_exception_handler(loop, context):
msg = f"{context.get('message', '')} : {context.get('exception', '')} @ " \
f"{context.get('future','')}"
logger.error("Exception caught at global level: %s", msg)
{% endif %}
{% if cookiecutter.command_line_interface|lower == 'click' %}
{%- if cookiecutter.use_onacol == 'y' %}
@click.command(context_settings=dict(
ignore_unknown_options=True,
allow_extra_args=True
))
@click.option("--config", type=click.Path(exists=True), default=None,
help="Path to the configuration file.")
@click.option("--get-config-template", type=click.File("w"), default=None,
help="Write default configuration template to the file.")
@click.pass_context
def main(ctx, config, get_config_template):
"""Console script for {{cookiecutter.project_slug}}."""
# Instantiate config_manager
config_manager = ConfigManager(
DEFAULT_CONFIG_FILE,
env_var_prefix="{{cookiecutter.project_slug}}",
optional_files=[config] if config else []
)
# Generate configuration for the --get-config-template option
# Then finish the application
if get_config_template:
config_manager.generate_config_example(get_config_template)
sys.exit(0)
# Load (implicit) environment variables
config_manager.config_from_env_vars()
# Parse all extra command line options
config_manager.config_from_cli_args(ctx.args)
# Validate the config
try:
config_manager.validate()
except ConfigValidationError as cve:
click.secho("<----------------Configuration problem---------------->",
fg='red')
# Logging is not yet configured at this point.
click.secho(str(cve), fg='red', err=True)
sys.exit(1)
{%- if cookiecutter.use_classic_aiohttp_setup == 'y' %}
# Asyncio loop setup
loop = asyncio.get_event_loop()
loop.set_exception_handler(global_exception_handler)
logging.basicConfig(level=getattr(
logging, config_manager.config['general']['log_level']),
format="%(asctime)s.%(msecs)03d [%(name)s][%(levelname)s] %(message)s",
datefmt="%H:%M:%S",
stream=sys.stdout)
logging.getLogger("aiohttp").setLevel(logging.WARNING)
logging.getLogger("sockjs").setLevel(logging.WARNING)
# Setup your main classes here
try:
click.secho("Running {{cookiecutter.project_slug}} application ..", fg='green')
# Start your app here
loop.run_forever()
except KeyboardInterrupt:
click.secho("<--------------- Shutting down ------------------->",
fg='red')
except Exception as e:
logger.exception(e)
finally:
try:
# Stop and cleanup your app here
loop.run_until_complete(asyncio.sleep(1.0))
loop.close()
except Exception as e:
logger.exception("Error occured during shutdown : %s", e)
click.secho("<--------------- Stopped ------------------->", fg='red')
{%- endif %}
click.echo("Replace this message by putting your code into "
"{{cookiecutter.project_slug}}.cli.main")
click.echo("See click documentation at https://click.palletsprojects.com/")
sys.exit(0)
{% else %}
@click.command()
def main(args=None):
"""Console script for {{cookiecutter.project_slug}}."""
click.echo("Replace this message by putting your code into "
"{{cookiecutter.project_slug}}.cli.main")
click.echo("See click documentation at https://click.palletsprojects.com/")
sys.exit(0)
{%- endif %}
{%- endif %}
if __name__ == "__main__":
main() # pragma: no cover
``` |
{
"source": "jnewbery/bitcoin_for_hackers",
"score": 3
} |
#### File: jnewbery/bitcoin_for_hackers/ledger.py
```python
from collections import OrderedDict
import matplotlib.pyplot as plt
import numpy as np
class Ledger():
def __init__(self, users):
super().__init__()
self.users = users
self.txs = []
def __repr__(self):
return "[{}]".format(",\n ".join([str(tx[0]) for tx in self.txs]))
def list_with_spentness(self):
return "[{}]".format(",\n ".join([str(tx[0]) + ", " + str(tx[1]) for tx in self.txs]))
def add_transaction(self, tx):
if tx.parent is None:
self.txs.append([tx, "unspent"])
else:
for in_tx in self.txs:
if in_tx[0].id == tx.parent:
if in_tx[1] == "unspent":
in_tx[1] = "spent"
self.txs.append([tx, "unspent"])
elif in_tx[1] in ["spent", "invalid double spend"]:
self.txs.append([tx, "invalid double spend"])
return
def get_transactions(self):
return [tx[0] for tx in self.txs]
def get_parent_tx(self, tx):
for in_tx in [tx[0] for tx in self.txs]:
if in_tx.id == tx.parent:
return in_tx
def swap_order(self, a, b):
txs = [tx[0] for tx in self.txs]
txs[a], txs[b] = txs[b], txs[a]
self.txs = []
for tx in txs:
self.add_transaction(tx)
def get_balances(self, allow_doublespends=True):
balances = OrderedDict()
for user in self.users:
balances[user] = 0
if allow_doublespends:
# calculate balances on the fly
for tx in [tx[0] for tx in self.txs]:
if tx.recipient_name in balances:
# Add 1 to the recipient's balance
balances[tx.recipient_name] += 1
if tx.parent is not None:
# Remove 1 from the sender's balance
balances[self.get_parent_tx(tx).recipient_name] -= 1
else:
# calculate balances based on spentness
for tx in self.txs:
if tx[1] == "unspent" and tx[0].recipient_name in balances:
balances[tx[0].recipient_name] += 1
return balances
def draw_balances(self, allow_doublespends=True):
fig, ax = plt.subplots()
ind = np.arange(1, 4)
balances = list(self.get_balances(allow_doublespends).values())
n0, n1, n2 = plt.bar(ind, balances)
n0.set_facecolor('r')
n1.set_facecolor('r')
n2.set_facecolor('r')
ax.set_xticks(ind)
ax.set_xticklabels(self.users)
ax.set_ylim([0, max(balances) + 1])
ax.set_yticks([0, 1, 2])
ax.set_ylabel('Balance')
ax.set_title('Balances')
# ask the canvas to re-draw itself the next time it
# has a chance.
# For most of the GUI backends this adds an event to the queue
# of the GUI frameworks event loop.
fig.canvas.draw()
``` |
{
"source": "jnewbery/bitcointools",
"score": 2
} |
#### File: jnewbery/bitcointools/fees.py
```python
from serialize import open_bs, SerializationError
class TxConfirmStats():
"""Tracks buckets of transactions and how long it took for them to confirm in a block.
From v15.0 fee_estimates.dat files contains 3 TxConfirmStats objects (feeStats, shortStats and longStats).
decay: TODO
scale: TODO
max_periods:TODO
max_confirms:TODO
avg :TODO
txCtAvg :TODO
confAvg :TODO
failAvg :TODO"""
def __init__(self, no_buckets):
self.no_buckets = no_buckets
self.decay = 0
self.scale = 0
self.max_periods = 0
self.max_confirms = 0
self.avg = []
self.txCtAvg = []
self.confAvg = []
self.failAvg = []
def __repr__(self):
ret = "Decay: {}\n".format(self.decay)
ret += "Scale: {}\n".format(self.scale)
ret += "Avg Fees: {}\n".format(self.avg)
ret += "Bucket tx counts: {}\n".format(self.txCtAvg)
ret += "confAvg: {}\n".format(self.confAvg)
ret += "failAvg: {}\n".format(self.confAvg)
ret += "max periods: {}\n".format(self.max_periods)
ret += "max confirms: {}\n".format(self.max_confirms)
return ret
def deserialize(self, f):
self.decay = f.deser_double()
self.scale = f.deser_uint32()
avg_size = f.deser_compact_size()
if avg_size != self.no_buckets:
raise SerializationError("Corrupt estimates file. Mismatch in feerate average bucket count")
for _ in range(self.no_buckets):
self.avg.append(f.deser_double())
tx_ct_avg_size = f.deser_compact_size()
if tx_ct_avg_size != self.no_buckets:
raise SerializationError("Corrupt estimates file. Mismatch in tx count bucket count")
for _ in range(self.no_buckets):
self.txCtAvg.append(f.deser_double())
no_block_targets = f.deser_compact_size()
for _ in range(no_block_targets):
conf_avg = []
no_conf_avg = f.deser_compact_size()
if no_conf_avg != self.no_buckets:
raise SerializationError("Corrupt estimates file. Mismatch in feerate conf average bucket count")
for __ in range(self.no_buckets):
conf_avg.append(f.deser_double())
self.confAvg.append(conf_avg)
self.max_periods = len(self.confAvg)
self.max_confirms = self.scale * self.max_periods
no_block_targets = f.deser_compact_size()
for _ in range(no_block_targets):
fail_avg = []
no_fail_avg = f.deser_compact_size()
if no_fail_avg != self.no_buckets:
raise SerializationError("Corrupt estimates file. Mismatch in one of failure average bucket counts")
for __ in range(self.no_buckets):
fail_avg.append(f.deser_double())
self.failAvg.append(fail_avg)
class FeeEstimates():
"""Represents contests of fee_estimates.dat file.
version_required: the version of bitcoind that wrote this fee estimates file.
version_that_wrote: the minimum version of bitcoind that can read this fee estimates file.
file_best_seen_height: the height of the higest block that was processed for this fee estimates file.
file_historical_first and file_historical_best: the spam of bloacks for which bitcoind was tracking
fee estimates when the fee estimate file was written. Used by bitcoind to know what targets it can
successfully evaluate with the data in the file.
fee_stats: a medium range TxConfirmStats object tracking transactions confirmed in up to 48 blocks (granularity: 2 blocks)
short_stats: a short range TxConfirmStats object tracking transactions confirmed in up to 12 blocks (granularity: 1 block)
long_stats: a long range TxConfirmStats object tracking transactions confirmed in up to 1008 blocks (granularity: 24 blocks)
buckets: TODO."""
def __init__(self):
self.version_required = 0
self.version_that_wrote = 0
self.file_best_seen_height = 0
self.file_historical_first = 0
self.file_historical_best = 0
self.buckets = []
self.fee_stats = None
self.short_stats = None
self.long_stats = None
def __repr__(self):
ret = "Version required: {}\n".format(self.version_required)
ret += "Version that wrote: {}\n".format(self.version_that_wrote)
ret += "File best seen height: {}\n".format(self.file_best_seen_height)
ret += "File historical first: {}\n".format(self.file_historical_first)
ret += "File historical best: {}\n".format(self.file_historical_best)
ret += "Buckets: {}\n".format(self.buckets)
ret += "Short Term Fee Stats:\n"
ret += self.short_stats.__repr__()
ret += "Medium Term Fee Stats:\n"
ret += self.fee_stats.__repr__()
ret += "Long Term Fee Stats:\n"
ret += self.long_stats.__repr__()
return ret
def deserialize(self, f):
self.version_required = f.deser_uint32()
self.version_that_wrote = f.deser_uint32()
if self.version_that_wrote < 149900:
raise SerializationError("Cannot read fee_estimates.dat file with version < 149900")
self.file_best_seen_height = f.deser_uint32()
self.file_historical_first = f.deser_uint32()
self.file_historical_best = f.deser_uint32()
if self.file_historical_first > self.file_historical_best or self.file_historical_first > self.file_best_seen_height:
raise SerializationError("Corrupt estimates file. Historical block range for estimates is invalid")
no_buckets = f.deser_compact_size()
if no_buckets <= 1 or no_buckets > 1000:
raise SerializationError("Corrupt estimates file. Must have between 2 and 1000 feerate buckets")
for _ in range(no_buckets):
self.buckets.append(f.deser_double())
# Deserialize the TxConfirmStats parts
self.fee_stats = TxConfirmStats(no_buckets)
self.fee_stats.deserialize(f)
self.short_stats = TxConfirmStats(no_buckets)
self.short_stats.deserialize(f)
self.long_stats = TxConfirmStats(no_buckets)
self.long_stats.deserialize(f)
def dump_fee_estimates(fee_file):
fee_estimates = FeeEstimates()
with open_bs(fee_file, "r") as f:
fee_estimates.deserialize(f)
print(fee_estimates)
```
#### File: bitcointools/test/test_base58.py
```python
from binascii import unhexlify
import unittest
import base58
# Test vectors imported from
# https://github.com/bitcoin/bitcoin/blob/b5e4b9b5100ec15217d43edb5f4149439f4b20a5/src/test/data/base58_encode_decode.json
TEST_CASES = [
["", ""],
["61", "2g"],
["626262", "a3gV"],
["636363", "aPEr"],
["73696d706c792061206c6f6e6720737472696e67", "2cFupjhnEsSn59qHXstmK2ffpLv2"],
["<KEY>", "<KEY>"],
["00cf00816e4be7dbc1c3df0c2b0be2b77e4ad99a14111e6c6f", "<KEY>"],
["516b6fcd0f", "ABnLTmg"],
["bf4f89001e670274dd", "3SEo3LWLoPntC"],
["572e4794", "3EFU7m"],
["ecac89cad93923c02321", "EJDM8drfXA6uyA"],
["10c8511e", "Rt5zm"],
["00000000000000000000", "1111111111"]
]
class Base58TestCase(unittest.TestCase):
def test_bytes_to_base58(self):
for input_hex, expected_base58 in TEST_CASES:
input_bin = unhexlify(input_hex)
actual_base58 = base58.bytes_to_base58(input_bin)
self.assertEqual(actual_base58, expected_base58)
# TODO: test base58 decoding
``` |
{
"source": "jnewbery/lightning-integration",
"score": 2
} |
#### File: jnewbery/lightning-integration/eclair.py
```python
from binascii import hexlify
from ephemeral_port_reserve import reserve
from lnaddr import lndecode
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
from utils import TailableProc
import json
import logging
import os
import psutil
import re
import requests
import shutil
import time
def requests_retry_session(
retries=3,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
session=None,
):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
class EclairD(TailableProc):
def __init__(self, lightning_dir, bitcoind, port):
TailableProc.__init__(self, lightning_dir, "eclair({})".format(port))
self.lightning_dir = lightning_dir
self.bitcoind = bitcoind
self.port = port
self.rpc_port = str(reserve())
self.prefix = 'eclair'
self.cmd_line = [
'java',
'-Declair.datadir={}'.format(lightning_dir),
'-Dlogback.configurationFile={}'.format(os.path.join(lightning_dir, 'logback.xml')),
'-jar',
'bin/eclair.jar'
]
if not os.path.exists(lightning_dir):
os.makedirs(lightning_dir)
shutil.copyfile('logback.xml', os.path.join(lightning_dir, "logback.xml"))
# Adapt the config and store it
with open('src/eclair/eclair-core/src/main/resources/reference.conf') as f:
config = f.read()
replacements = [
('chain = "mainnet"', 'chain = "regtest"'),
('enabled = false // disabled by default for security reasons', 'enabled = true'),
('password = ""', 'password = "<PASSWORD>"'),
('9735', str(port)),
('8332', str(self.bitcoind.rpcport)),
('8080', str(self.rpc_port)),
('"test"', '"regtest"'),
('"foo"', '"rpcuser"'),
('"bar"', '"rpcpass"'),
('zmqblock = "tcp://127.0.0.1:29000"', 'zmqblock = "tcp://127.0.0.1:{}"'.format(self.bitcoind.zmqpubrawblock_port)),
('zmqtx = "tcp://127.0.0.1:29000"', 'zmqtx = "tcp://127.0.0.1:{}"'.format(self.bitcoind.zmqpubrawtx_port)),
('use-old-api = false', 'use-old-api = true'),
]
for old, new in replacements:
config = config.replace(old, new)
with open(os.path.join(lightning_dir, "eclair.conf"), "w") as f:
f.write(config)
def start(self):
TailableProc.start(self)
self.wait_for_log("connected to tcp://127.0.0.1:")
# And let's also remember the address
exp = 'initial wallet address=([a-zA-Z0-9]+)'
addr_line = self.wait_for_log(exp)
self.addr = re.search(exp, addr_line).group(1)
self.logger.info("Eclair started (pid: {})".format(self.proc.pid))
def stop(self):
# Java forks internally and detaches its children, use psutil to hunt
# them down and kill them
proc = psutil.Process(self.proc.pid)
processes = [proc] + proc.children(recursive=True)
# Be nice to begin with
for p in processes:
p.terminate()
_, alive = psutil.wait_procs(processes, timeout=3)
# But if they aren't, we can be more persuasive
for p in alive:
p.kill()
psutil.wait_procs(alive, timeout=3)
self.thread.join()
super().save_log()
class EclairNode(object):
displayName = 'eclair'
def __init__(self, lightning_dir, lightning_port, btc, executor=None,
node_id=0):
self.bitcoin = btc
self.executor = executor
self.daemon = EclairD(lightning_dir, self.bitcoin,
port=lightning_port)
self.rpc = EclairRpc(
'http://localhost:{}'.format(self.daemon.rpc_port))
self.logger = logging.getLogger('eclair-node({})'.format(lightning_port))
def peers(self):
return [p['nodeId'] for p in self.rpc.peers()]
def id(self):
info = self.rpc._call("getinfo", {})
return info['nodeId']
def openchannel(self, node_id, host, port, satoshis):
r = self.rpc._call('open', {"nodeId": node_id, "fundingSatoshis": satoshis, "pushMsat": 0})
return r
def getaddress(self):
return self.daemon.addr
def addfunds(self, bitcoind, satoshis):
addr = self.getaddress()
bitcoind.rpc.sendtoaddress(addr, float(satoshis) / 10**8)
# Eclair seems to grab funds from the block, so give it a
# chance to see it
time.sleep(1)
bitcoind.rpc.generate(1)
def ping(self):
""" Simple liveness test to see if the node is up and running
Returns true if the node is reachable via RPC, false otherwise.
"""
try:
self.rpc.help()
return True
except:
return False
def check_channel(self, remote):
""" Make sure that we have an active channel with remote
"""
self_id = self.id()
remote_id = remote.id()
for c in self.rpc.channels():
channel = self.rpc.channel(c)
if channel['nodeId'] == remote_id:
self.logger.debug("Channel {} -> {} state: {}".format(self_id, remote_id, channel['state']))
return channel['state'] == 'NORMAL'
self.logger.warning("Channel {} -> {} not found".format(self_id, remote_id))
return False
def getchannels(self):
channels = []
for c in self.rpc._call('channels', {}):
channels.append((c['a'], c['b']))
channels.append((c['b'], c['a']))
return channels
def getnodes(self):
return set([n['nodeId'] for n in self.rpc.allnodes()])
def invoice(self, amount):
req = self.rpc._call("createinvoice", {"amountMsat": amount, "description": "invoice1"})
logging.debug(req)
return req['serialized']
def send(self, req):
details = self.parse_invoice(req)
payment_hash = details['paymentHash']
payment_id = self.rpc._call("payinvoice", {"invoice": req})
for i in range(100):
result = self.rpc._call('getsentinfo', {'paymentHash': payment_hash, 'id': payment_id})[0]
if result['status'] == 'SUCCEEDED':
break
time.sleep(1)
if 'failures' in result:
raise ValueError("Failed to send payment: {}".format(result))
else:
return result['preimage']
def parse_invoice(self, invoice):
return self.rpc._call('parseinvoice', {'invoice': invoice})
def connect(self, host, port, node_id):
return self.rpc._call('connect', {'nodeId': node_id, 'host': host, 'port': port})
def block_sync(self, blockhash):
time.sleep(1)
def info(self):
r = self.rpc._call('getinfo', {})
return {
'id': r['nodeId'],
'blockheight': r['blockHeight'],
}
def restart(self):
self.daemon.stop()
time.sleep(5)
self.daemon.start()
time.sleep(1)
def stop(self):
self.daemon.stop()
def start(self):
self.daemon.start()
def check_route(self, node_id, amount):
try:
r = self.rpc._call("findroutetonode", {"nodeId": node_id, "amountMsat": amount})
except ValueError as e:
if (str(e).find("command failed: route not found") > 0):
return False
raise
return True
class EclairRpc(object):
def __init__(self, url):
self.url = url
# self.session = requests_retry_session(retries=10, session=requests.Session())
def _call(self, method, params):
#headers = {'Content-type': 'multipart/form-data'}
headers = {}
logging.info("Calling {} with params={}".format(method, json.dumps(params, indent=4, sort_keys=True)))
url = "{}/{}".format(self.url, method)
with requests_retry_session(retries=10, session=requests.Session()) as s:
reply = s.post(url, data=params, headers=headers, auth=('user', 'rpcpass'))
if reply.status_code != 200:
raise ValueError("Server returned an unknown error: {} ({})".format(
reply.status_code, reply.text))
logging.debug("Method {} returned {}".format(method, json.dumps(reply.json(), indent=4, sort_keys=True)))
if 'error' in reply.json():
raise ValueError('Error calling {}: {}'.format(
method, reply.json()))
else:
return reply.json()
def peers(self):
return self._call('peers', {})
def channels(self):
return [c['channelId'] for c in self._call('channels', {})]
def channel(self, cid):
return self._call('channel', {'channelId': cid})
def allnodes(self):
return self._call('allnodes', {})
def help(self):
return self._call('getinfo', {})
```
#### File: jnewbery/lightning-integration/ptarmd.py
```python
from utils import TailableProc
import json
import logging
import os
import time
import subprocess
import re
import socket
class PtarmD(TailableProc):
def __init__(self, lightning_dir, bitcoin_dir, port=9735):
TailableProc.__init__(self, lightning_dir, 'ptarmd({}).format(port)')
self.lightning_dir = lightning_dir
self.port = port
self.cmd_line = [
'bin/ptarmd',
'-d', lightning_dir,
'-p', str(port),
'-c', '{}/bitcoin.conf'.format(bitcoin_dir),
'--network', 'regtest',
'--rpcport', str(port+1234),
]
self.prefix = 'ptarmd'
if not os.path.exists(lightning_dir):
os.makedirs(lightning_dir)
def start(self):
TailableProc.start(self)
self.wait_for_log("start ptarmigan node.", offset=100)
time.sleep(3)
logging.info("PtarmD started")
def stop(self):
TailableProc.stop(self)
logging.info("PtarmD stopped")
class PtarmNode(object):
displayName = 'ptarmigan'
def __init__(self, lightning_dir, lightning_port, btc, executor=None,
node_id=0):
self.bitcoin = btc
self.executor = executor
self.daemon = PtarmD(
lightning_dir,
btc.bitcoin_dir,
port=lightning_port
)
self.rpc = PtarmRpc('127.0.0.1', lightning_port+1234)
self.myid = None
self.node_id = node_id
self.bitcoind = None
self.txid = None
self.vout = None
self.peer_host = None
self.peer_port = None
self.peer_node_id = None
self.push_sat = 0
self.feerate_per_kw = 12*1000
self.logger = self.daemon.logger
def peers(self):
r = self.rpc.getinfo()
return [p['node_id'] for p in r['peers']]
def getinfo(self):
raise NotImplementedError()
def id(self):
if not self.myid:
self.myid = self.rpc.getinfo()['node_id']
return self.myid
def openchannel(self, node_id, host, port, satoshis):
# Make sure we have a connection already
if node_id not in self.peers():
raise ValueError("Must connect to node before opening a channel")
return self.rpc.fundchannel(
node_id,
self.peer_host,
self.peer_port,
self.txid,
self.vout,
satoshis,
self.push_sat,
self.feerate_per_kw
)
def getaddress(self):
raise NotImplementedError()
def addfunds(self, bitcoind, satoshis):
# ptarmd uses bitcoind's wallet.
self.bitcoind = bitcoind
addr = bitcoind.rpc.getnewaddress('', 'p2sh-segwit')
self.txid = bitcoind.rpc.sendtoaddress(addr, float(satoshis) / 10**8)
listunspent = bitcoind.rpc.listunspent(0, 1, [addr])
self.vout = listunspent[0]['vout']
# Lock vout to not be used for other transactions.
assert bitcoind.rpc.lockunspent(
False,
[{"txid": self.txid, "vout": self.vout}]
)
time.sleep(1)
bitcoind.rpc.generatetoaddress(1, addr)
def ping(self):
""" Simple liveness test to see if the node is up and running
Returns true if the node is reachable via RPC, false otherwise.
"""
try:
self.rpc.getinfo()
return True
except Exception:
return False
def check_channel(self, remote):
""" Make sure that we have an active channel with remote
"""
remote_id = remote.id()
self_id = self.id()
for p in self.rpc.getinfo()['peers']:
if 'node_id' not in p:
continue
if remote.id() == p['node_id']:
state = p['status']
self.logger.debug("Channel {} -> {} state: {}".format(
self_id,
remote_id, state
))
return state == 'normal operation'
self.logger.warning("Channel {} -> {} not found".format(
self_id,
remote_id
))
return False
def getchannels(self):
proc = subprocess.run(
['{}/bin/showdb'.format(os.getcwd()), '-c'],
stdout=subprocess.PIPE,
cwd=self.daemon.lightning_dir
)
decoder = json.JSONDecoder()
objs, _ = decoder.raw_decode(proc.stdout.decode("UTF-8"))
result = []
if 'channel_announcement_list' in objs:
for c in objs['channel_announcement_list']:
if c['type'] != 'channel_announcement':
continue
result.append((c['node1'], c['node2']))
result.append((c['node2'], c['node1']))
return set(result)
def getnodes(self):
""" Get nodes on channels
"""
nodes = set()
# Get a list of node ids from `node_announcement`s. but it
# always includes my node id even if my node has no relevant
# channels.
proc = subprocess.run(
['{}/bin/showdb'.format(os.getcwd()), '-n'],
stdout=subprocess.PIPE,
cwd=self.daemon.lightning_dir
)
objs, _ = json.JSONDecoder().raw_decode(proc.stdout.decode("UTF-8"))
if 'node_announcement_list' not in objs:
return set()
nodes = set([n['node'] for n in objs['node_announcement_list']])
# Get a list of `channel_announcement`s,
# and discard my node id from `nodes` if it has no relevant channels.
proc = subprocess.run(
['{}/bin/showdb'.format(os.getcwd()), '-c'],
stdout=subprocess.PIPE,
cwd=self.daemon.lightning_dir
)
objs, _ = json.JSONDecoder().raw_decode(proc.stdout.decode("UTF-8"))
if 'channel_announcement_list' not in objs:
return set()
for c in objs['channel_announcement_list']:
if c['type'] != 'channel_announcement':
continue
if c['node1'] == self.id():
# found
break
if c['node2'] == self.id():
# found
break
else:
# not found
nodes.discard(self.id())
return nodes
def invoice(self, amount):
r = self.rpc.invoice(amount)
return r['bolt11']
def send(self, req):
self.rpc.pay(req) # Will raise on error, but no useful info
# Grab the preimage from listpayment
preimage = None
for i in range(5):
r = self.rpc.listpayment()
for pay in r:
if pay['invoice'] == req:
if ('preimage' in pay) and (len(pay['preimage']) != 0):
preimage = pay['preimage']
break
if preimage is None:
time.sleep(1)
continue
break
if preimage is None:
raise ValueError(
"Could not found preimage from listpayment"
)
return preimage
def connect(self, host, port, node_id):
self.peer_host = host
self.peer_port = port
self.peer_node_id = node_id
initial_routing_sync = 1
return self.rpc.connect(node_id, host, port, initial_routing_sync)
def info(self):
r = self.rpc.getinfo()
return {
'id': r['node_id'],
'blockheight': r['block_count'],
}
def block_sync(self, blockhash):
time.sleep(1)
def restart(self):
self.daemon.stop()
time.sleep(5)
self.daemon.start()
time.sleep(1)
def stop(self):
self.daemon.stop()
def start(self):
self.daemon.start()
def check_route(self, node_id, amount):
proc = subprocess.run([
'{}/bin/routing'.format(os.getcwd()),
'-s',
self.id(),
'-r',
node_id,
'-a',
str(amount)
], stdout=subprocess.PIPE, cwd=self.daemon.lightning_dir)
return proc.returncode == 0
class TcpSocketRpc(object):
# The code of this class was copied a lot from `lightning.py`
# - https://github.com/ElementsProject/lightning/blob/master/contrib/pylightning/lightning/lightning.py
def __init__(self, host, port, executor=None, logger=logging):
self.host = host
self.port = port
self.decoder = json.JSONDecoder()
self.executor = executor
self.logger = logger
@staticmethod
def _writeobj(sock, obj):
s = json.dumps(obj)
sock.sendall(bytearray(s, 'UTF-8'))
def _readobj(self, sock):
buff = b''
while True:
try:
b = sock.recv(1024)
buff += b
if len(b) == 0:
return {'error': 'Connection to RPC server lost.'}
# Convert late to UTF-8 so glyphs split across recvs do not
# impact us
objs, _ = self.decoder.raw_decode(buff.decode("UTF-8"))
return objs
except ValueError:
# Probably didn't read enough
pass
def __getattr__(self, name):
"""Intercept any call that is not explicitly defined and call @call
We might still want to define the actual methods in the subclasses for
documentation purposes.
"""
name = name.replace('_', '-')
def wrapper(**kwargs):
return self.call(name, payload=kwargs)
return wrapper
def call(self, method, payload=None):
self.logger.debug("Calling %s with payload %r", method, payload)
if payload is None:
payload = {}
# Filter out arguments that are None
payload = [v for v in payload if v is not None]
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
self._writeobj(sock, {
"method": method,
"params": payload,
"id": 0
})
resp = self._readobj(sock)
sock.close()
self.logger.debug("Received response for %s call: %r", method, resp)
if "error" in resp:
raise ValueError(
"RPC call failed: {}, method: {}, payload: {}".format(
resp["error"],
method,
payload
))
elif "result" not in resp:
raise ValueError("Malformed response, \"result\" missing.")
return resp["result"]
class PtarmRpc(TcpSocketRpc):
def invoice(self, msatoshi):
payload = [msatoshi]
return self.call("invoice", payload)
def getinfo(self):
return self.call("getinfo")
def listpayment(self):
return self.call("listpayment")
def pay(self, bolt11, msatoshi=None, description=None, riskfactor=None):
payload = [bolt11, 0]
return self.call("routepay", payload)
def connect(self, peer_id, host=None, port=None, initial_routing_sync=None):
payload = [peer_id, '127.0.0.1', port, initial_routing_sync]
return self.call("connect", payload)
def fundchannel(self, peer_id, peer_host, peer_port, txid, txindex,
funding_sat, push_sat, feerate_per_kw):
payload = [
peer_id,
peer_host,
peer_port,
txid,
txindex,
funding_sat,
push_sat,
feerate_per_kw
]
return self.call("fund", payload)
``` |
{
"source": "jnewbery/SheetSync",
"score": 3
} |
#### File: SheetSync/tests/test_multi_keys.py
```python
import sheetsync
import time, os
CLIENT_ID = os.environ['SHEETSYNC_CLIENT_ID']
CLIENT_SECRET = os.environ['SHEETSYNC_CLIENT_SECRET']
TEMPLATE_DOC = "0AsrRHMfAlOZrdFlLLWlzM2dhZ0tyS1k5RUxmVGU3cEE"
TESTS_FOLDER_KEY = os.environ.get("SHEETSYNC_FOLDER_KEY")
target = None
"""
def setup_function(function):
global target
print ('setup_function: Create test spreadsheet.')
# Copy the template spreadsheet into the prescribed folder.
target = sheetsync.Sheet(GOOGLE_U,
GOOGLE_P,
title = ("test_%s" % int(time.time())),
folder_key = TESTS_FOLDER,
template_key = TEMPLATE_DOC,
sheet_name = "TEST",
header_row_ix=2,
key_column_headers = ["Initials"],
formula_ref_row_ix=1)
def teardown_function(function):
print ('teardown_function Delete test spreadsheet')
gdc = target._doc_client_pool[GOOGLE_U]
target_rsrc = gdc.get_resource_by_id(target.document_key)
gdc.Delete(target_rsrc)
def test_date_keys():
print ('TODO: Test dates as keys.')
assert True
def test_tuple_keys():
print ('TODO: Test dates as keys.')
assert True
def test_integers_keys():
print ('TODO: Test dates as keys.')
assert True
def test_tuple_mix_keys():
print ('TODO: Test dates as keys.')
assert True
"""
``` |
{
"source": "jnewbigin/pkcs7_detached",
"score": 3
} |
#### File: jnewbigin/pkcs7_detached/ec2_example.py
```python
import requests
from pkcs7_detached import verify_detached_signature, aws_certificates
import json
from pprint import pprint
def main():
print("Verifying ec2 instance identity document")
r = requests.get("http://169.254.169.254/latest/dynamic/instance-identity/document")
identity_document = r.text
r = requests.get("http://169.254.169.254/latest/dynamic/instance-identity/pkcs7")
pkcs7 = r.text
if verify_detached_signature(
identity_document, pkcs7, aws_certificates.PUBLIC_REGIONS
):
print("Verified")
identity = json.loads(identity_document)
pprint(identity)
else:
print("Identity is not valid")
if __name__ == "__main__":
main()
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.